Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7064546 2022-10-13 00:30:05 2022-10-15 07:45:28 2022-10-15 08:04:27 0:18:59 0:11:30 0:07:29 smithi main centos 8.stream orch:cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi061 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 9b7575e4-4c5e-11ed-8437-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 7064547 2022-10-13 00:30:06 2022-10-15 07:45:29 2022-10-15 07:56:05 0:10:36 0:03:22 0:07:14 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}} 1
Failure Reason:

Command failed on smithi046 with status 1: 'TESTDIR=/home/ubuntu/cephtest bash -s'

fail 7064548 2022-10-13 00:30:07 2022-10-15 07:45:29 2022-10-15 08:01:19 0:15:50 0:08:49 0:07:01 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi087 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064549 2022-10-13 00:30:08 2022-10-15 07:45:29 2022-10-15 08:04:22 0:18:53 0:13:14 0:05:39 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-services/jaeger 3-final} 2
pass 7064550 2022-10-13 00:30:09 2022-10-15 07:45:30 2022-10-15 08:22:13 0:36:43 0:29:40 0:07:03 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7064551 2022-10-13 00:30:10 2022-10-15 07:45:30 2022-10-15 08:28:38 0:43:08 0:36:25 0:06:43 smithi main centos 8.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/16.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
fail 7064552 2022-10-13 00:30:12 2022-10-15 07:45:41 2022-10-15 08:00:48 0:15:07 0:09:11 0:05:56 smithi main rhel 8.6 orch:cephadm/orchestrator_cli/{0-random-distro$/{rhel_8.6_container_tools_rhel8} 2-node-mgr agent/off orchestrator_cli} 2
Failure Reason:

Command failed on smithi063 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064553 2022-10-13 00:30:13 2022-10-15 07:45:51 2022-10-15 08:00:47 0:14:56 0:07:01 0:07:55 smithi main centos 8.stream orch:cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} workloads/cephadm_iscsi} 3
Failure Reason:

Command failed on smithi005 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fb96f6be-4c5e-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064554 2022-10-13 00:30:14 2022-10-15 07:47:01 2022-10-15 08:00:09 0:13:08 0:06:25 0:06:43 smithi main centos 8.stream orch:cephadm/smoke-singlehost/{0-random-distro$/{centos_8.stream_container_tools} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi125 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d3e27f94-4c5e-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064555 2022-10-13 00:30:15 2022-10-15 07:47:22 2022-10-15 08:39:08 0:51:46 0:43:30 0:08:16 smithi main centos 8.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
fail 7064556 2022-10-13 00:30:16 2022-10-15 07:48:02 2022-10-15 08:04:13 0:16:11 0:09:15 0:06:56 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_rhel8 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi066 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064557 2022-10-13 00:30:17 2022-10-15 07:48:13 2022-10-15 08:03:05 0:14:52 0:08:50 0:06:02 smithi main rhel 8.6 orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/off mon_election/classic task/test_nfs} 1
Failure Reason:

Command failed on smithi190 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064558 2022-10-13 00:30:18 2022-10-15 07:48:13 2022-10-15 08:13:05 0:24:52 0:19:08 0:05:44 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-services/mirror 3-final} 2
fail 7064559 2022-10-13 00:30:19 2022-10-15 07:48:13 2022-10-15 08:07:17 0:19:04 0:12:51 0:06:13 smithi main rhel 8.6 orch:cephadm/smoke/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi104 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ab7d957e-4c5f-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/nvme4n1'

fail 7064560 2022-10-13 00:30:20 2022-10-15 07:48:14 2022-10-15 08:03:31 0:15:17 0:09:10 0:06:07 smithi main rhel 8.6 orch:cephadm/thrash/{0-distro/rhel_8.6_container_tools_rhel8 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi130 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064561 2022-10-13 00:30:21 2022-10-15 07:48:34 2022-10-15 08:09:49 0:21:15 0:11:16 0:09:59 smithi main ubuntu 20.04 orch:cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi003 with status 1: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c638de96-4c5f-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064562 2022-10-13 00:30:22 2022-10-15 07:48:44 2022-10-15 08:10:07 0:21:23 0:10:50 0:10:33 smithi main ubuntu 20.04 orch:cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_orch_cli} 1
Failure Reason:

Command failed on smithi167 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b9a3521a-4c5f-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064563 2022-10-13 00:30:23 2022-10-15 07:48:45 2022-10-15 08:13:30 0:24:45 0:19:10 0:05:35 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
fail 7064564 2022-10-13 00:30:24 2022-10-15 07:48:45 2022-10-15 08:06:38 0:17:53 0:11:14 0:06:39 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi081 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ab2f9f90-4c5f-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064565 2022-10-13 00:30:26 2022-10-15 07:49:25 2022-10-15 08:05:55 0:16:30 0:11:23 0:05:07 smithi main centos 8.stream orch:cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
pass 7064566 2022-10-13 00:30:27 2022-10-15 07:49:36 2022-10-15 08:26:30 0:36:54 0:30:10 0:06:44 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
fail 7064567 2022-10-13 00:30:28 2022-10-15 07:49:36 2022-10-15 08:09:07 0:19:31 0:12:27 0:07:04 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/centos_8.stream_container_tools agent/off mon_election/classic task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi085 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d038b7b8-4c5f-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064568 2022-10-13 00:30:29 2022-10-15 07:50:07 2022-10-15 08:07:05 0:16:58 0:11:04 0:05:54 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools_crun fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi078 with status 1: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b3bcf73e-4c5f-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064569 2022-10-13 00:30:30 2022-10-15 07:50:07 2022-10-15 08:19:55 0:29:48 0:20:31 0:09:17 smithi main ubuntu 20.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
fail 7064570 2022-10-13 00:30:31 2022-10-15 07:50:17 2022-10-15 08:10:28 0:20:11 0:10:08 0:10:03 smithi main ubuntu 20.04 orch:cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi006 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid bce24d82-4c5f-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/nvme4n1'

fail 7064571 2022-10-13 00:30:32 2022-10-15 07:50:48 2022-10-15 08:10:11 0:19:23 0:09:07 0:10:16 smithi main ubuntu 20.04 orch:cephadm/thrash/{0-distro/ubuntu_20.04 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi134 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fc45cd8c-4c5f-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064572 2022-10-13 00:30:33 2022-10-15 07:50:48 2022-10-15 08:06:16 0:15:28 0:08:47 0:06:41 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_3.0 fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi090 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064573 2022-10-13 00:30:34 2022-10-15 07:50:49 2022-10-15 08:07:43 0:16:54 0:09:33 0:07:21 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/centos_8.stream_container_tools_crun agent/on mon_election/connectivity task/test_adoption} 1
pass 7064574 2022-10-13 00:30:35 2022-10-15 07:50:49 2022-10-15 08:15:08 0:24:19 0:15:59 0:08:20 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
fail 7064575 2022-10-13 00:30:36 2022-10-15 07:51:39 2022-10-15 08:06:48 0:15:09 0:08:52 0:06:17 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_rhel8 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi142 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064576 2022-10-13 00:30:37 2022-10-15 07:52:00 2022-10-15 08:37:02 0:45:02 0:30:32 0:14:30 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7064577 2022-10-13 00:30:38 2022-10-15 08:00:12 2022-10-15 08:44:19 0:44:07 0:32:53 0:11:14 smithi main ubuntu 20.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/connectivity} 2
fail 7064578 2022-10-13 00:30:39 2022-10-15 08:00:53 2022-10-15 08:15:50 0:14:57 0:08:18 0:06:39 smithi main rhel 8.6 orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/off mon_election/classic task/test_cephadm} 1
Failure Reason:

Command failed on smithi063 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064579 2022-10-13 00:30:40 2022-10-15 08:00:53 2022-10-15 08:23:46 0:22:53 0:16:30 0:06:23 smithi main rhel 8.6 orch:cephadm/osds/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi026 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 542c17f8-4c61-11ed-8437-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 7064580 2022-10-13 00:30:41 2022-10-15 08:00:53 2022-10-15 08:21:01 0:20:08 0:09:24 0:10:44 smithi main ubuntu 20.04 orch:cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi005 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 97364abe-4c61-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064581 2022-10-13 00:30:42 2022-10-15 08:00:54 2022-10-15 08:22:59 0:22:05 0:15:13 0:06:52 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
pass 7064582 2022-10-13 00:30:43 2022-10-15 08:01:24 2022-10-15 08:18:12 0:16:48 0:09:28 0:07:20 smithi main rhel 8.6 orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/on mon_election/connectivity task/test_cephadm_repos} 1
pass 7064583 2022-10-13 00:30:44 2022-10-15 08:03:15 2022-10-15 08:44:31 0:41:16 0:33:35 0:07:41 smithi main centos 8.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
fail 7064584 2022-10-13 00:30:45 2022-10-15 08:03:36 2022-10-15 08:20:11 0:16:35 0:07:42 0:08:53 smithi main centos 8.stream orch:cephadm/smoke/{0-distro/centos_8.stream_container_tools 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi035 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6121f6da-4c61-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/nvme4n1'

fail 7064585 2022-10-13 00:30:46 2022-10-15 08:04:16 2022-10-15 08:23:32 0:19:16 0:12:03 0:07:13 smithi main centos 8.stream orch:cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid aff2eb98-4c61-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064586 2022-10-13 00:30:47 2022-10-15 08:04:26 2022-10-15 08:23:34 0:19:08 0:11:58 0:07:10 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi061 with status 1: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b34fff10-4c61-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064587 2022-10-13 00:30:48 2022-10-15 08:04:37 2022-10-15 08:30:30 0:25:53 0:18:37 0:07:16 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs 3-final} 2
fail 7064588 2022-10-13 00:30:50 2022-10-15 08:05:57 2022-10-15 08:23:39 0:17:42 0:09:56 0:07:46 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/ubuntu_20.04 agent/off mon_election/classic task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}} 1
Failure Reason:

Command failed on smithi090 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f75ac816-4c61-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064589 2022-10-13 00:30:51 2022-10-15 08:06:18 2022-10-15 08:44:06 0:37:48 0:30:32 0:07:16 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
fail 7064590 2022-10-13 00:30:52 2022-10-15 08:06:48 2022-10-15 08:25:41 0:18:53 0:11:21 0:07:32 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools_crun fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi093 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fcd37414-4c61-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064591 2022-10-13 00:30:53 2022-10-15 08:06:59 2022-10-15 08:30:07 0:23:08 0:15:54 0:07:14 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
fail 7064592 2022-10-13 00:30:54 2022-10-15 08:07:09 2022-10-15 08:22:31 0:15:22 0:08:15 0:07:07 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi104 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064593 2022-10-13 00:30:55 2022-10-15 08:07:25 2022-10-15 08:23:43 0:16:18 0:10:53 0:05:25 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/centos_8.stream_container_tools agent/on mon_election/connectivity task/test_nfs} 1
Failure Reason:

Command failed on smithi053 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fcc5b4dc-4c61-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064594 2022-10-13 00:30:56 2022-10-15 08:07:25 2022-10-15 08:32:08 0:24:43 0:15:47 0:08:56 smithi main rhel 8.6 orch:cephadm/osds/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
fail 7064595 2022-10-13 00:30:57 2022-10-15 08:09:16 2022-10-15 08:23:57 0:14:41 0:07:51 0:06:50 smithi main centos 8.stream orch:cephadm/smoke/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi116 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e139b1f0-4c61-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/nvme4n1'

fail 7064596 2022-10-13 00:30:59 2022-10-15 08:09:16 2022-10-15 08:24:22 0:15:06 0:09:05 0:06:01 smithi main centos 8.stream orch:cephadm/thrash/{0-distro/centos_8.stream_container_tools_crun 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi085 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 56c09466-4c62-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064597 2022-10-13 00:31:00 2022-10-15 08:09:17 2022-10-15 08:24:40 0:15:23 0:08:30 0:06:53 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_rhel8 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi132 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064598 2022-10-13 00:31:01 2022-10-15 08:09:57 2022-10-15 08:41:46 0:31:49 0:21:49 0:10:00 smithi main ubuntu 20.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
fail 7064599 2022-10-13 00:31:02 2022-10-15 08:10:18 2022-10-15 08:25:15 0:14:57 0:08:12 0:06:45 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/centos_8.stream_container_tools_crun agent/off mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi167 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6c6dadbc-4c62-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064600 2022-10-13 00:31:03 2022-10-15 08:10:18 2022-10-15 08:45:52 0:35:34 0:28:51 0:06:43 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
fail 7064601 2022-10-13 00:31:04 2022-10-15 08:10:39 2022-10-15 08:26:29 0:15:50 0:08:12 0:07:38 smithi main rhel 8.6 orch:cephadm/orchestrator_cli/{0-random-distro$/{rhel_8.6_container_tools_rhel8} 2-node-mgr agent/on orchestrator_cli} 2
Failure Reason:

Command failed on smithi112 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064602 2022-10-13 00:31:05 2022-10-15 08:11:29 2022-10-15 08:28:06 0:16:37 0:10:43 0:05:54 smithi main rhel 8.6 orch:cephadm/smoke-singlehost/{0-random-distro$/{rhel_8.6_container_tools_rhel8} 1-start 2-services/rgw 3-final} 1
Failure Reason:

Command failed on smithi055 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c57b9932-4c62-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064603 2022-10-13 00:31:06 2022-10-15 08:11:40 2022-10-15 09:02:20 0:50:40 0:43:26 0:07:14 smithi main centos 8.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
fail 7064604 2022-10-13 00:31:07 2022-10-15 08:13:11 2022-10-15 08:32:53 0:19:42 0:08:42 0:11:00 smithi main ubuntu 20.04 orch:cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi047 with status 1: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1f1f6c2a-4c63-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064605 2022-10-13 00:31:08 2022-10-15 08:13:31 2022-10-15 08:34:45 0:21:14 0:13:08 0:08:06 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/rgw 3-final} 2
fail 7064606 2022-10-13 00:31:10 2022-10-15 08:15:12 2022-10-15 08:31:38 0:16:26 0:08:22 0:08:04 smithi main rhel 8.6 orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi188 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064607 2022-10-13 00:31:11 2022-10-15 08:16:33 2022-10-15 08:33:51 0:17:18 0:09:00 0:08:18 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi157 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a2d79f56-4c63-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064608 2022-10-13 00:31:12 2022-10-15 08:18:13 2022-10-15 08:41:28 0:23:15 0:14:52 0:08:23 smithi main rhel 8.6 orch:cephadm/osds/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-ops/repave-all} 2
fail 7064609 2022-10-13 00:31:13 2022-10-15 08:20:04 2022-10-15 08:40:13 0:20:09 0:11:35 0:08:34 smithi main rhel 8.6 orch:cephadm/smoke/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi035 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 35feaf4a-4c64-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/nvme4n1'

pass 7064610 2022-10-13 00:31:14 2022-10-15 08:20:14 2022-10-15 08:38:53 0:18:39 0:10:53 0:07:46 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-services/basic 3-final} 2
fail 7064611 2022-10-13 00:31:15 2022-10-15 08:21:05 2022-10-15 08:37:05 0:16:00 0:08:29 0:07:31 smithi main rhel 8.6 orch:cephadm/thrash/{0-distro/rhel_8.6_container_tools_3.0 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi106 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064612 2022-10-13 00:31:16 2022-10-15 08:22:16 2022-10-15 08:39:42 0:17:26 0:09:25 0:08:01 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools_crun fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi104 with status 1: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 383bbdde-4c64-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064613 2022-10-13 00:31:17 2022-10-15 08:22:36 2022-10-15 08:37:56 0:15:20 0:08:29 0:06:51 smithi main rhel 8.6 orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/off mon_election/classic task/test_adoption} 1
Failure Reason:

Command failed on smithi087 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064614 2022-10-13 00:31:18 2022-10-15 08:23:06 2022-10-15 09:00:30 0:37:24 0:31:13 0:06:11 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
fail 7064615 2022-10-13 00:31:20 2022-10-15 08:23:37 2022-10-15 08:38:31 0:14:54 0:08:26 0:06:28 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_3.0 fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064616 2022-10-13 00:31:21 2022-10-15 08:23:37 2022-10-15 08:52:01 0:28:24 0:16:18 0:12:06 smithi main ubuntu 20.04 orch:cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_cephadm} 1
pass 7064617 2022-10-13 00:31:22 2022-10-15 08:23:38 2022-10-15 08:46:41 0:23:03 0:16:46 0:06:17 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
pass 7064618 2022-10-13 00:31:23 2022-10-15 08:23:48 2022-10-15 09:01:21 0:37:33 0:29:46 0:07:47 smithi main centos 8.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/16.2.5 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
fail 7064619 2022-10-13 00:31:24 2022-10-15 08:23:48 2022-10-15 08:38:50 0:15:02 0:08:41 0:06:21 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_rhel8 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi145 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064620 2022-10-13 00:31:25 2022-10-15 08:23:59 2022-10-15 08:35:22 0:11:23 0:04:52 0:06:31 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/centos_8.stream_container_tools agent/off mon_election/classic task/test_cephadm_repos} 1
pass 7064621 2022-10-13 00:31:26 2022-10-15 08:24:29 2022-10-15 08:47:33 0:23:04 0:16:21 0:06:43 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
fail 7064622 2022-10-13 00:31:28 2022-10-15 08:24:50 2022-10-15 08:42:11 0:17:21 0:11:14 0:06:07 smithi main rhel 8.6 orch:cephadm/smoke/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi083 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c58fd5da-4c64-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/nvme4n1'

fail 7064623 2022-10-13 00:31:29 2022-10-15 08:25:10 2022-10-15 08:40:18 0:15:08 0:08:35 0:06:33 smithi main rhel 8.6 orch:cephadm/thrash/{0-distro/rhel_8.6_container_tools_rhel8 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi085 with status 1: 'sudo yum -y install ceph-radosgw'

fail 7064624 2022-10-13 00:31:30 2022-10-15 08:25:20 2022-10-15 08:44:55 0:19:35 0:09:02 0:10:33 smithi main ubuntu 20.04 orch:cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi093 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d0ee7bfc-4c64-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064625 2022-10-13 00:31:31 2022-10-15 08:25:51 2022-10-15 09:01:33 0:35:42 0:29:19 0:06:23 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7064626 2022-10-13 00:31:32 2022-10-15 08:26:31 2022-10-15 09:07:42 0:41:11 0:31:47 0:09:24 smithi main ubuntu 20.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
fail 7064627 2022-10-13 00:31:33 2022-10-15 08:26:32 2022-10-15 08:54:12 0:27:40 0:15:00 0:12:40 smithi main ubuntu 20.04 orch:cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi033 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fc097940-4c64-11ed-8437-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 7064628 2022-10-13 00:31:34 2022-10-15 08:28:42 2022-10-15 08:43:12 0:14:30 0:08:42 0:05:48 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/centos_8.stream_container_tools_crun agent/on mon_election/connectivity task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}} 1
Failure Reason:

Command failed on smithi055 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fd4b15e8-4c64-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064629 2022-10-13 00:31:35 2022-10-15 08:28:43 2022-10-15 08:45:20 0:16:37 0:08:58 0:07:39 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi078 with status 1: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4131bb7c-4c65-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7064630 2022-10-13 00:31:36 2022-10-15 08:30:13 2022-10-15 08:58:03 0:27:50 0:17:31 0:10:19 smithi main ubuntu 20.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/jaeger 3-final} 2
fail 7064631 2022-10-13 00:31:37 2022-10-15 08:30:34 2022-10-15 08:46:39 0:16:05 0:09:00 0:07:05 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools_crun fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi102 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6dedbd50-4c65-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

fail 7064632 2022-10-13 00:31:39 2022-10-15 08:31:44 2022-10-15 08:46:48 0:15:04 0:08:15 0:06:49 smithi main rhel 8.6 orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/off mon_election/classic task/test_nfs} 1
Failure Reason:

Command failed on smithi063 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064633 2022-10-13 00:31:40 2022-10-15 08:31:45 2022-10-15 08:50:22 0:18:37 0:11:50 0:06:47 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/mirror 3-final} 2
fail 7064634 2022-10-13 00:31:41 2022-10-15 08:31:45 2022-10-15 08:49:47 0:18:02 0:06:59 0:11:03 smithi main ubuntu 20.04 orch:cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi121 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7ce12d92-4c65-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/nvme4n1'

dead 7064635 2022-10-13 00:31:42 2022-10-15 08:32:16 2022-10-15 08:58:08 0:25:52 0:15:29 0:10:23 smithi main ubuntu 20.04 orch:cephadm/thrash/{0-distro/ubuntu_20.04 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async root} 2
Failure Reason:

{'smithi047.front.sepia.ceph.com': {'_ansible_no_log': False, 'attempts': 24, 'changed': False, 'invocation': {'module_args': {'allow_unauthenticated': False, 'autoclean': False, 'autoremove': False, 'cache_valid_time': 0, 'deb': None, 'default_release': None, 'dpkg_options': 'force-confdef,force-confold', 'force': False, 'force_apt_get': False, 'install_recommends': None, 'only_upgrade': False, 'package': None, 'policy_rc_d': None, 'purge': False, 'state': 'present', 'update_cache': True, 'update_cache_retries': 5, 'update_cache_retry_max_delay': 12, 'upgrade': None}}, 'msg': 'Failed to update apt cache: unknown reason'}}

fail 7064636 2022-10-13 00:31:43 2022-10-15 08:32:56 2022-10-15 08:49:32 0:16:36 0:08:02 0:08:34 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi190 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064637 2022-10-13 00:31:44 2022-10-15 08:33:57 2022-10-15 09:12:24 0:38:27 0:30:50 0:07:37 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
fail 7064638 2022-10-13 00:31:45 2022-10-15 08:34:47 2022-10-15 08:49:14 0:14:27 0:08:29 0:05:58 smithi main rhel 8.6 orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/on mon_election/connectivity task/test_orch_cli} 1
Failure Reason:

Command failed on smithi032 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064639 2022-10-13 00:31:46 2022-10-15 08:34:48 2022-10-15 08:55:50 0:21:02 0:13:00 0:08:02 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
fail 7064640 2022-10-13 00:31:47 2022-10-15 08:36:59 2022-10-15 08:51:51 0:14:52 0:08:22 0:06:30 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_rhel8 fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi106 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7064641 2022-10-13 00:31:48 2022-10-15 08:37:09 2022-10-15 08:54:00 0:16:51 0:11:18 0:05:33 smithi main centos 8.stream orch:cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
fail 7064642 2022-10-13 00:31:50 2022-10-15 08:37:09 2022-10-15 09:01:20 0:24:11 0:11:44 0:12:27 smithi main ubuntu 20.04 orch:cephadm/workunits/{0-distro/ubuntu_20.04 agent/off mon_election/classic task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi005 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c365098ecad370669563ee5c63d97f1160ffad4d shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 04c2b5c2-4c67-11ed-8437-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'