Description: rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rmdir-reactivate}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2024-05-21_01:07:55-rados-wip-yuri7-testing-2024-05-20-1227-distro-default-smithi/7717752/teuthology.log

Failure Reason:

Error reimaging machines: Failed to power on smithi172

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2024-05-21_01:07:55-rados-wip-yuri7-testing-2024-05-20-1227-distro-default-smithi/7717752/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2024-05-21_01:07:55-rados-wip-yuri7-testing-2024-05-20-1227-distro-default-smithi/7717752
  • description: rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
  • duration:
  • email: yweinste@redhat.com
  • failure_reason: Error reimaging machines: Failed to power on smithi172
  • flavor:
  • job_id: 7717752
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2024-05-21_01:07:55-rados-wip-yuri7-testing-2024-05-20-1227-distro-default-smithi
  • nuke_on_error:
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: wip-yuri7-testing-2024-05-20-1227
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
          • osd shutdown pgref assert: True
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • OSD_DOWN
      • log-only-match:
        • CEPHADM_
      • sha1: 9ef9bcfdb32ec4999d7ca025815290e8395c6ae7
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: 9ef9bcfdb32ec4999d7ca025815290e8395c6ae7
      • selinux:
        • allowlist:
          • scontext=system_u:system_r:logrotate_t:s0
      • workunit:
        • branch: wip-yuri7-testing-2024-05-20-1227
        • sha1: 9ef9bcfdb32ec4999d7ca025815290e8395c6ae7
    • owner: scheduled_yuriw@teuthology
    • pid:
    • roles:
      • ['host.a', 'client.0']
      • ['host.b', 'client.1']
    • sentry_event:
    • status: dead
    • success:
    • branch: wip-yuri7-testing-2024-05-20-1227
    • seed: 4736
    • sha1: 9ef9bcfdb32ec4999d7ca025815290e8395c6ae7
    • subset: 111/120000
    • suite: rados
    • suite_branch: wip-yuri7-testing-2024-05-20-1227
    • suite_path: /home/teuthworker/src/github.com_ceph_ceph-c_9ef9bcfdb32ec4999d7ca025815290e8395c6ae7/qa
    • suite_relpath: qa
    • suite_repo: https://github.com/ceph/ceph-ci.git
    • suite_sha1: 9ef9bcfdb32ec4999d7ca025815290e8395c6ae7
    • targets:
      • smithi055.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC5W5zVhZquUtJaRxtnyde1fTz0VrNS7Xr0GaNoubJv3JZzrE3tspQujnR+/hRsd8XvIPJKW70rAJAHJ/p6JA+s=
      • smithi172.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAuEMV99a5HOI3ySoO1ETZgmbJPnbYQwedT7k3pORlHP2iSiLQwoae1zicYzW7G8JYgcBE2EW4vhLAuO8INkj1E=
    • tasks:
      • pexec:
        • all:
          • sudo dnf install runc -y
          • sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf
          • sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf
      • nvme_loop:
      • cephadm:
        • roleless: True
      • cephadm.shell:
        • host.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
          • ceph orch ls | grep '^osd.all-available-devices '
      • cephadm.shell:
        • host.a:
          • set -e set -x ceph orch ps HOST=$(hostname -s) OSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk '{print $1}') echo "host $HOST, osd $OSD" ceph orch daemon stop $OSD while ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done ceph auth export $OSD > k ceph orch daemon rm $OSD --force ceph orch ps --refresh while ceph orch ps | grep $OSD ; do sleep 5 ; done ceph auth add $OSD -i k ceph cephadm osd activate $HOST while ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done
      • cephadm.healthy:
    • teuthology_branch: main
    • verbose: True
    • pcp_grafana_url:
    • priority: 99
    • user: yuriw
    • queue:
    • posted: 2024-05-21 01:14:13
    • started: 2024-05-21 20:30:41
    • updated: 2024-05-21 20:33:25
    • status_class: danger
    • runtime: 0:02:44