Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7903113 2024-09-13 05:01:07 2024-09-13 05:03:21 2024-09-13 05:21:47 0:18:26 0:08:20 0:10:06 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi144 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a0a9655e-718f-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903114 2024-09-13 05:01:09 2024-09-13 05:04:42 2024-09-13 05:24:27 0:19:45 0:09:54 0:09:51 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain} 3
Failure Reason:

Command failed on smithi145 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e34a8208-718f-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

pass 7903115 2024-09-13 05:01:10 2024-09-13 05:05:13 2024-09-13 05:49:08 0:43:55 0:32:21 0:11:34 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.0} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7903116 2024-09-13 05:01:12 2024-09-13 05:05:43 2024-09-13 05:49:04 0:43:21 0:33:20 0:10:01 smithi main centos 9.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
fail 7903117 2024-09-13 05:01:13 2024-09-13 05:06:14 2024-09-13 05:23:50 0:17:36 0:08:35 0:09:01 smithi main centos 9.stream orch:cephadm/nfs/{cluster/{1-node} conf/{client mds mgr mon osd} overrides/{ignore_mgr_down ignorelist_health pg_health} supported-random-distros$/{centos_latest} tasks/nfs} 1
Failure Reason:

Command failed on smithi150 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fa7f2c80-718f-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903118 2024-09-13 05:01:14 2024-09-13 05:06:44 2024-09-13 05:24:10 0:17:26 0:08:28 0:08:58 smithi main centos 9.stream orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi203 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 10b73b28-7190-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

pass 7903119 2024-09-13 05:01:16 2024-09-13 05:07:04 2024-09-13 05:32:11 0:25:07 0:12:09 0:12:58 smithi main centos 9.stream orch:cephadm/orchestrator_cli/{0-random-distro$/{centos_9.stream_runc} 2-node-mgr agent/off orchestrator_cli} 2
dead 7903120 2024-09-13 05:01:17 2024-09-13 05:08:55 2024-09-13 05:49:10 0:40:15 smithi main ubuntu 22.04 orch:cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
fail 7903121 2024-09-13 05:01:19 2024-09-13 05:11:36 2024-09-13 05:30:14 0:18:38 0:06:44 0:11:54 smithi main centos 9.stream orch:cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} workloads/cephadm_iscsi} 3
Failure Reason:

Command failed on smithi136 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 94ae227a-7190-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

pass 7903122 2024-09-13 05:01:20 2024-09-13 05:13:07 2024-09-13 05:38:44 0:25:37 0:14:47 0:10:50 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
fail 7903123 2024-09-13 05:01:21 2024-09-13 05:13:47 2024-09-13 05:31:09 0:17:22 0:06:20 0:11:02 smithi main centos 9.stream orch:cephadm/smoke-singlehost/{0-random-distro$/{centos_9.stream_runc} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi178 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c6b3c0ae-7190-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903124 2024-09-13 05:01:23 2024-09-13 05:13:48 2024-09-13 05:32:27 0:18:39 0:07:48 0:10:51 smithi main centos 9.stream orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/classic start} 3
Failure Reason:

Command failed on smithi060 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f61de1c6-7190-11ef-bceb-c7b262605968 -- lvm zap /dev/nvme1n1'

fail 7903125 2024-09-13 05:01:24 2024-09-13 05:15:29 2024-09-13 05:35:28 0:19:59 0:08:48 0:11:11 smithi main centos 9.stream orch:cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi167 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6ee8aa0a-7191-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

dead 7903126 2024-09-13 05:01:25 2024-09-13 05:15:59 2024-09-13 05:49:38 0:33:39 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-reef 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
fail 7903127 2024-09-13 05:01:27 2024-09-13 05:16:40 2024-09-13 05:36:48 0:20:08 0:07:50 0:12:18 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_mgr_ctdb_res_ips} 4
Failure Reason:

Command failed on smithi039 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7b467836-7191-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903128 2024-09-13 05:01:28 2024-09-13 05:17:00 2024-09-13 05:36:07 0:19:07 0:09:24 0:09:43 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi052 with status 1: 'sudo cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7fa3ec42-7191-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

dead 7903129 2024-09-13 05:01:29 2024-09-13 05:17:11 2024-09-13 05:49:10 0:31:59 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_iscsi_container/{centos_9.stream test_iscsi_container}} 1
pass 7903130 2024-09-13 05:01:31 2024-09-13 05:17:11 2024-09-13 05:42:46 0:25:35 0:15:03 0:10:32 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
dead 7903131 2024-09-13 05:01:32 2024-09-13 05:17:31 2024-09-13 05:50:28 0:32:57 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final} 2
fail 7903132 2024-09-13 05:01:33 2024-09-13 05:18:52 2024-09-13 05:42:12 0:23:20 0:11:46 0:11:34 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi012 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b0833b9c-7191-11ef-bceb-c7b262605968 -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 7903133 2024-09-13 05:01:35 2024-09-13 05:21:13 2024-09-13 05:44:48 0:23:35 0:12:45 0:10:50 smithi main ubuntu 22.04 orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_mgr_domain} 2
Failure Reason:

failed to connect to AD DC SMB share

fail 7903134 2024-09-13 05:01:36 2024-09-13 05:21:23 2024-09-13 05:43:41 0:22:18 0:11:59 0:10:19 smithi main ubuntu 22.04 orch:cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi144 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5c7f4fb2-7192-11ef-bceb-c7b262605968 -- lvm zap /dev/nvme4n1'

fail 7903135 2024-09-13 05:01:37 2024-09-13 05:22:04 2024-09-13 05:41:40 0:19:36 0:09:07 0:10:29 smithi main centos 9.stream orch:cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi016 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4347f68e-7192-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903136 2024-09-13 05:01:39 2024-09-13 05:22:35 2024-09-13 05:46:41 0:24:06 0:13:42 0:10:24 smithi main ubuntu 22.04 orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid bf551932-7192-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903137 2024-09-13 05:01:40 2024-09-13 05:23:15 2024-09-13 05:49:12 0:25:57 0:16:00 0:09:57 smithi main ubuntu 22.04 orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_monitoring_stack_basic} 3
Failure Reason:

Command failed on smithi037 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid db7ed238-7192-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

dead 7903138 2024-09-13 05:01:42 2024-09-13 05:23:36 2024-09-13 05:50:36 0:27:00 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7903139 2024-09-13 05:01:43 2024-09-13 05:23:36 2024-09-13 05:46:27 0:22:51 0:13:26 0:09:25 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs 3-final} 2
dead 7903140 2024-09-13 05:01:44 2024-09-13 05:23:36 2024-09-13 06:10:23 0:46:47 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
Failure Reason:

Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds

fail 7903141 2024-09-13 05:01:45 2024-09-13 05:24:07 2024-09-13 05:48:35 0:24:28 0:11:48 0:12:40 smithi main centos 9.stream orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi123 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f38f82d2-7192-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

pass 7903142 2024-09-13 05:01:47 2024-09-13 05:25:28 2024-09-13 05:49:29 0:24:01 0:12:42 0:11:19 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
fail 7903143 2024-09-13 05:01:48 2024-09-13 05:26:28 2024-09-13 05:44:02 0:17:34 0:06:07 0:11:27 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_mgr_res_basic} 2
Failure Reason:

Command failed on smithi086 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b1819786-7192-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903144 2024-09-13 05:01:50 2024-09-13 05:26:29 2024-09-13 05:47:00 0:20:31 0:09:27 0:11:04 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi043 with status 1: 'sudo cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 04895fd6-7193-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903145 2024-09-13 05:01:51 2024-09-13 05:27:39 2024-09-13 05:50:00 0:22:21 0:09:50 0:12:31 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_rgw_multisite} 3
Failure Reason:

Command failed on smithi083 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3d68501e-7193-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

dead 7903146 2024-09-13 05:01:52 2024-09-13 05:28:30 2024-09-13 05:50:24 0:21:54 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nvmeof 3-final} 2
dead 7903147 2024-09-13 05:01:53 2024-09-13 05:28:51 2024-09-13 05:49:42 0:20:51 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
dead 7903148 2024-09-13 05:01:55 2024-09-13 05:30:31 2024-09-13 05:50:05 0:19:34 smithi main ubuntu 22.04 orch:cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root} 2
dead 7903149 2024-09-13 05:01:56 2024-09-13 05:30:42 2024-09-13 05:50:42 0:20:00 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.0} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
dead 7903150 2024-09-13 05:01:57 2024-09-13 05:49:36 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
dead 7903151 2024-09-13 05:01:59 2024-09-13 05:32:23 2024-09-13 05:49:44 0:17:21 smithi main centos 9.stream orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 3
dead 7903152 2024-09-13 05:02:00 2024-09-13 05:32:43 2024-09-13 05:49:39 0:16:56 smithi main ubuntu 22.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04-reef 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
dead 7903153 2024-09-13 05:02:01 2024-09-13 05:35:44 2024-09-13 05:49:20 0:13:36 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_mgr_res_dom} 2
dead 7903154 2024-09-13 05:02:03 2024-09-13 05:36:25 2024-09-13 05:49:41 0:13:16 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rotate-keys} 2
dead 7903155 2024-09-13 05:02:04 2024-09-13 05:36:25 2024-09-13 05:50:55 0:14:30 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_set_mon_crush_locations} 3
dead 7903156 2024-09-13 05:02:06 2024-09-13 05:36:56 2024-09-13 05:49:54 0:12:58 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw 3-final} 2
dead 7903157 2024-09-13 05:02:07 2024-09-13 05:36:56 2024-09-13 05:50:58 0:14:02 smithi main ubuntu 22.04 orch:cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/classic task/test_adoption} 1
dead 7903158 2024-09-13 05:02:08 2024-09-13 05:37:07 2024-09-13 05:50:11 0:13:04 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/deploy-raw} 2
dead 7903159 2024-09-13 05:02:10 2024-09-13 05:37:07 2024-09-13 05:49:25 0:12:18 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_basic} 2
dead 7903160 2024-09-13 05:02:11 2024-09-13 05:38:39 2024-09-13 05:50:00 0:11:21 smithi main centos 9.stream orch:cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
dead 7903161 2024-09-13 05:02:12 2024-09-13 05:38:59 2024-09-13 05:50:33 0:11:34 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/basic 3-final} 2
dead 7903162 2024-09-13 05:02:14 2024-09-13 05:40:50 2024-09-13 05:50:18 0:09:28 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
dead 7903163 2024-09-13 05:02:15 2024-09-13 05:41:31 2024-09-13 05:49:24 0:07:53 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_ca_signed_key} 2
dead 7903164 2024-09-13 05:02:16 2024-09-13 05:41:41 2024-09-13 05:49:08 0:07:27 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
dead 7903165 2024-09-13 05:02:18 2024-09-13 05:41:52 2024-09-13 05:49:18 0:07:26 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
dead 7903166 2024-09-13 05:02:19 2024-09-13 05:42:22 2024-09-13 05:50:29 0:08:07 smithi main ubuntu 22.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
fail 7903167 2024-09-13 05:02:21 2024-09-13 05:43:03 2024-09-13 06:07:09 0:24:06 0:12:36 0:11:30 smithi main ubuntu 22.04 orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_domain} 2
Failure Reason:

Command failed on smithi169 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 97498484-7195-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903168 2024-09-13 05:02:22 2024-09-13 05:43:23 2024-09-13 06:07:39 0:24:16 0:13:46 0:10:30 smithi main ubuntu 22.04 orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi144 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b8adf614-7195-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

pass 7903169 2024-09-13 05:02:23 2024-09-13 05:44:04 2024-09-13 06:14:38 0:30:34 0:20:11 0:10:23 smithi main ubuntu 22.04 orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_cephadm} 1
pass 7903170 2024-09-13 05:02:25 2024-09-13 05:44:14 2024-09-13 06:07:11 0:22:57 0:12:53 0:10:04 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
pass 7903171 2024-09-13 05:02:26 2024-09-13 05:44:25 2024-09-13 06:14:07 0:29:42 0:19:08 0:10:34 smithi main ubuntu 22.04 orch:cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/repave-all} 2
fail 7903172 2024-09-13 05:02:28 2024-09-13 05:44:35 2024-09-13 06:05:35 0:21:00 0:09:11 0:11:49 smithi main centos 9.stream orch:cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi064 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 9c437d32-7195-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

pass 7903173 2024-09-13 05:02:29 2024-09-13 05:46:36 2024-09-13 06:18:28 0:31:52 0:21:23 0:10:29 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/jaeger 3-final} 2
fail 7903174 2024-09-13 05:02:30 2024-09-13 05:46:57 2024-09-13 06:04:35 0:17:38 0:06:22 0:11:16 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_mgr_basic} 2
Failure Reason:

Command failed on smithi043 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 8609b46e-7195-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'

fail 7903175 2024-09-13 05:02:32 2024-09-13 05:47:17 2024-09-13 06:07:23 0:20:06 0:09:24 0:10:42 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi098 with status 1: 'sudo cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:9d608eb13dd49ee7b4b1637dbcf4f78b31ddd3d3 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ebcfa538-7195-11ef-bceb-c7b262605968 -- lvm zap /dev/vg_nvme/lv_4'