Description: rados/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rmdir-reactivate}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2024-02-10_17:31:55-rados-wip-yuri6-testing-2024-02-09-1422-distro-default-smithi/7555768/teuthology.log

Failure Reason:

hit max job timeout

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2024-02-10_17:31:55-rados-wip-yuri6-testing-2024-02-09-1422-distro-default-smithi/7555768/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2024-02-10_17:31:55-rados-wip-yuri6-testing-2024-02-09-1422-distro-default-smithi/7555768
  • description: rados/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
  • duration:
  • email: yweinste@redhat.com
  • failure_reason: hit max job timeout
  • flavor:
  • job_id: 7555768
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2024-02-10_17:31:55-rados-wip-yuri6-testing-2024-02-09-1422-distro-default-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 22.04
  • overrides:
    • admin_socket:
      • branch: wip-yuri6-testing-2024-02-09-1422
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
          • osd shutdown pgref assert: True
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 14aa01b918f09661ae41e4c41d1de99d6ac08228
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: 14aa01b918f09661ae41e4c41d1de99d6ac08228
      • workunit:
        • branch: wip-yuri6-testing-2024-02-09-1422
        • sha1: 14aa01b918f09661ae41e4c41d1de99d6ac08228
    • owner: scheduled_yuriw@teuthology
    • pid:
    • roles:
      • ['host.a', 'client.0']
      • ['host.b', 'client.1']
    • sentry_event:
    • status: dead
    • success:
    • branch: wip-yuri6-testing-2024-02-09-1422
    • seed:
    • sha1: 14aa01b918f09661ae41e4c41d1de99d6ac08228
    • subset:
    • suite:
    • suite_branch: wip-yuri6-testing-2024-02-09-1422
    • suite_path:
    • suite_relpath:
    • suite_repo:
    • suite_sha1: 14aa01b918f09661ae41e4c41d1de99d6ac08228
    • targets:
      • smithi037.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBA1MhCJ9g/k30vZ8gma5ct3VU1bH1LftNsSiiL6P4Nkvifx3EoYfw+dpuMu0eoK+OCrPU2Y21PgWvcGeb9qWJWE=
      • smithi165.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO8eyshgkEjNWHpjULwZc9wV3HFqTFZlm8rY9esfgvVvMluogOQ1pytESEHs13i8fnOeXRfQhw6u9Ubhlzx16AA=
    • tasks:
      • nvme_loop:
      • cephadm:
        • roleless: True
      • cephadm.shell:
        • host.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
          • ceph orch ls | grep '^osd.all-available-devices '
      • cephadm.shell:
        • host.a:
          • set -e set -x ceph orch ps HOST=$(hostname -s) OSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk '{print $1}') echo "host $HOST, osd $OSD" ceph orch daemon stop $OSD while ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done ceph auth export $OSD > k ceph orch daemon rm $OSD --force ceph orch ps --refresh while ceph orch ps | grep $OSD ; do sleep 5 ; done ceph auth add $OSD -i k ceph cephadm osd activate $HOST while ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done
      • cephadm.healthy:
    • teuthology_branch: main
    • verbose: True
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2024-02-10 17:34:05
    • started: 2024-02-10 19:34:24
    • updated: 2024-02-11 07:44:32
    • status_class: danger
    • runtime: 12:10:08