Description: rados/multimon/{clusters/21 mon_election/classic msgr-failures/few msgr/async-v2only no_pools objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} tasks/mon_clock_with_skews}

Log: http://qa-proxy.ceph.com/teuthology/rzarzynski-2022-08-01_13:21:59-rados-main-distro-default-smithi/6954588/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=8a12f320d0b24dedbbd11b623e1b4e91

Failure Reason:

Command failed on smithi086 with status 1: 'sudo yum -y install ceph-radosgw'

  • log_href: http://qa-proxy.ceph.com/teuthology/rzarzynski-2022-08-01_13:21:59-rados-main-distro-default-smithi/6954588/teuthology.log
  • archive_path: /home/teuthworker/archive/rzarzynski-2022-08-01_13:21:59-rados-main-distro-default-smithi/6954588
  • description: rados/multimon/{clusters/21 mon_election/classic msgr-failures/few msgr/async-v2only no_pools objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} tasks/mon_clock_with_skews}
  • duration: 0:08:42
  • email:
  • failure_reason: Command failed on smithi086 with status 1: 'sudo yum -y install ceph-radosgw'
  • flavor: crimson
  • job_id: 6954588
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: rzarzynski-2022-08-01_13:21:59-rados-main-distro-default-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 8.5
  • overrides:
    • admin_socket:
      • branch: main
    • ceph:
      • conf:
        • global:
          • debug ms: 20
          • debug osd: 20
          • mon client directed command retry: 5
          • mon election default strategy: 1
          • ms bind msgr1: False
          • ms bind msgr2: True
          • ms inject socket failures: 5000
          • ms type: async
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon scrub interval: 300
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore allocator: stupid
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd debug verify cached snaps: True
          • osd debug verify missing on start: True
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd op queue: debug_random
          • osd op queue cut off: debug_random
      • create_rbd_pool: False
      • env:
        • LSAN_OPTIONS: report_objects=1
      • flavor: crimson
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(OSD_SLOW_PING_TIME
      • pre-mgr-commands:
        • sudo ceph config set mgr mgr_pool false --force
      • sha1: f2f3273b01602572d42ff3c803807ab253e75aa5
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
      • fs: xfs
    • install:
      • ceph:
        • flavor: crimson
        • sha1: f2f3273b01602572d42ff3c803807ab253e75aa5
      • flavor: crimson
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
    • workunit:
      • branch: main
      • sha1: 8e7f49c256f8f4423de0179cd5ade14f6f211bd5
  • owner: scheduled_rzarzynski@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.d', 'mon.g', 'mon.j', 'mon.m', 'mon.p', 'mon.s']
    • ['mon.b', 'mon.e', 'mon.h', 'mon.k', 'mon.n', 'mon.q', 'mon.t', 'mgr.x']
    • ['mon.c', 'mon.f', 'mon.i', 'mon.l', 'mon.o', 'mon.r', 'mon.u']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=8a12f320d0b24dedbbd11b623e1b4e91
  • status: fail
  • success: False
  • branch: main
  • seed:
  • sha1: f2f3273b01602572d42ff3c803807ab253e75aa5
  • subset:
  • suite:
  • suite_branch: main
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 8e7f49c256f8f4423de0179cd5ade14f6f211bd5
  • targets:
    • smithi039.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDsRFK+Rh0zCC23b6PHIrihhnOYENwziWeEN2pz2bY7aszvHFsr/caZmrdO1tmdku4wAaHi4pc9hWLMl9FZvWvz7pY4KGSoLcwIQ6DWPZ/bGge6wGHhWjDXfsO0s7lQKXGmewADFpC19kbDk73kaN8bb2GioN8+2HE0N98R+V4gaA43Nb5w0iMIibrqJTYL3k4qR5MmtTbNvrBriE60EGB/wCL7Q+/FRAuhf2nzD8El/0quG3UgpNLFxiXP7+iYC/jV0W3X3je1Zf5uqhtuWWJjThTCaA6wP11iIiO5JRxITvdScqFUy39gKMNP46oqDN2WrVVDWuEyDZ71qfB6yxMVD4+SkkyPLch6rUqiI4jqqdMd89M9LiMwr4x6t98Mbs13X9P/4OZ4G3ajf4Q5irtm3GE+JWQS+D8YvsTO+Fu+VYig9VT5SlwxfL9HlCPhXjc4Y0PgDp1DrWju2AYEvrwbxZudU5HUYtoqjBfGfFJv2gu25rfzATRUnigNqNiD9WM=
    • smithi086.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC5V5ys/N+0KbIWjJZCxqMMCs5QFigNBbKFj19oSdsg4AePIvmWJn15fMmo0GvzUiG33kEbOlzdfJca9V2EFZElCHDC8SLzyXoW9WkUJsE+jy/b0M71xRDQZghr9hQTzxrmUreF89wsqQSi3UDcMBFqFuDyCtetbypG1zjd8tPWFKsx8yv6fufGB9H5Xj5SwkD9cWWHPLLKDuGE7qPsnAf+3DOk3M3szc3o4Vy1Z5NX9orD3gRDY4LOw5ewvNnTaTViKUzu0YKoH5tfFup9De0Y8WOsIN4fMtDweTs9bGngvoRb9E5nGON5XVri7/mzKu6u4yC2XOLX7kiI8lyYdui+T0DJ0Th2fFF0ZaZxgW46Sz03+JcsrzbIr+uAMICEFJL3QeFJjVT7HiUfGw0Q3iEQT+sEAVnAuTmGDRibCXmGR6hJYkWxKqHfNT/5Hg2TOzkugmWdqWov6sH7FYNW7Qq4bMOXUoZUdmtTcNiouvJES1STuHPzF6irMdM6fLBf7Ns=
    • smithi157.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDCZsnUhDXx2gIOB8HaQHpXiBHC7krBwYJTxBSsoVr+aX5XjzBq7/32cEp/QggIEoyVpd8NTN3L4XlhBn9qY/8jt8XlgcRmYsW+nmQTf3TIEIN+tYYGocoUtNc23NuUbEdv7frTvW6kDVqYU6of9UEMB3b/6wiEOS4GtPy6T6AjCALqR/rmwsBNId4RWfil6sDFuVVN3GgNB7BKpmA7WZZ7TMtVV5j1RPbmkkfD+i/jVsqfL4GRKqluqL4AaOUquJ9eLRQnvxf4jdiJUOd/9S9q4rba3jluJhfll9Zni487kshkKDFBi9GYk0hEcuBBKUd+UkfSBXuxp5Ef1KlhGbSzi88H+g6k2dw5Dt1d+C8zmfJaMiStJHWRhvnFo7PMqK3tJ5SU4ZYqYNXtE5C6MckyQDkdtNTtx6kiFet/+bTmU6Fv43voVbW5NfQyVCyWE9oc7ToXYOiHgE6/eV1L0CeMafMRbhqzZSPsZzjLjb8RkeHt3+kAHAZ8X2IOzhDZO+s=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • exec:
      • mon.b:
        • sudo systemctl stop chronyd.service || true
        • sudo systemctl stop systemd-timesync.service || true
        • sudo systemctl stop ntpd.service || true
        • sudo systemctl stop ntp.service || true
        • date -u -s @$(expr $(date -u +%s) + 2)
    • ceph:
      • log-ignorelist:
        • .*clock.*skew.*
        • clocks not synchronized
        • overall HEALTH_
        • \(MON_CLOCK_SKEW\)
        • \(MGR_DOWN\)
        • \(MON_DOWN\)
        • \(PG_
        • \(SLOW_OPS\)
        • No standby daemons available
        • slow request
      • wait-for-healthy: False
    • mon_clock_skew_check:
      • expect-skew: True
  • teuthology_branch: main
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2022-08-01 13:22:44
  • started: 2022-08-01 13:23:13
  • updated: 2022-08-01 13:39:12
  • status_class: danger
  • runtime: 0:15:59
  • wait_time: 0:07:17