Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7076656 2022-10-20 14:54:14 2022-10-22 06:58:44 2022-10-22 07:14:24 0:15:40 0:08:43 0:06:57 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_orch_cli} 1
Failure Reason:

Command failed on smithi201 with status 5: 'sudo systemctl stop ceph-07ffb8a8-51d9-11ed-8438-001a4aab830c@mon.a'

fail 7076657 2022-10-20 14:54:15 2022-10-22 06:59:34 2022-10-22 07:11:10 0:11:36 0:04:23 0:07:13 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi154 with status 5: 'sudo systemctl stop ceph-7a22a590-51d8-11ed-8438-001a4aab830c@mon.smithi154'

fail 7076658 2022-10-20 14:54:16 2022-10-22 06:59:34 2022-10-22 07:28:14 0:28:40 0:20:14 0:08:26 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 509ee5de-51d9-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076659 2022-10-20 14:54:18 2022-10-22 07:01:05 2022-10-22 07:17:43 0:16:38 0:05:52 0:10:46 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/few rados thrashers/pggrow thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi074 with status 5: 'sudo systemctl stop ceph-521f1424-51d9-11ed-8438-001a4aab830c@mon.a'

fail 7076660 2022-10-20 14:54:19 2022-10-22 07:04:26 2022-10-22 07:20:32 0:16:06 0:04:42 0:11:24 smithi main ubuntu 20.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_20.04 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi008 with status 5: 'sudo systemctl stop ceph-b90b5350-51d9-11ed-8438-001a4aab830c@mon.a'

fail 7076661 2022-10-20 14:54:20 2022-10-22 07:05:06 2022-10-22 07:26:24 0:21:18 0:09:44 0:11:34 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi035 with status 5: 'sudo systemctl stop ceph-8f0f8d04-51da-11ed-8438-001a4aab830c@mon.a'

fail 7076662 2022-10-20 14:54:21 2022-10-22 07:08:07 2022-10-22 07:25:38 0:17:31 0:04:46 0:12:45 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-6f4dc7ce-51da-11ed-8438-001a4aab830c@mon.smithi040'

fail 7076663 2022-10-20 14:54:23 2022-10-22 07:10:18 2022-10-22 07:24:24 0:14:06 0:06:20 0:07:46 smithi main centos 8.stream orch/cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi154 with status 5: 'sudo systemctl stop ceph-5dc910ee-51da-11ed-8438-001a4aab830c@mon.smithi154'

fail 7076664 2022-10-20 14:54:24 2022-10-22 07:11:18 2022-10-22 07:27:10 0:15:52 0:09:12 0:06:40 smithi main centos 8.stream orch/cephadm/dashboard/{0-distro/centos_8.stream_container_tools task/test_e2e} 2
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-e1c196f0-51da-11ed-8438-001a4aab830c@mon.a'

fail 7076665 2022-10-20 14:54:25 2022-10-22 07:12:29 2022-10-22 07:40:43 0:28:14 0:20:55 0:07:19 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi036 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 17d373b2-51db-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076666 2022-10-20 14:54:26 2022-10-22 07:13:29 2022-10-22 07:41:27 0:27:58 0:21:11 0:06:47 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi003 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cdab373e-51da-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

pass 7076667 2022-10-20 14:54:27 2022-10-22 07:14:30 2022-10-22 07:37:50 0:23:20 0:16:55 0:06:25 smithi main rhel 8.4 orch/cephadm/orchestrator_cli/{0-random-distro$/{rhel_8.4_container_tools_3.0} 2-node-mgr orchestrator_cli} 2
pass 7076668 2022-10-20 14:54:28 2022-10-22 07:14:50 2022-10-22 07:52:28 0:37:38 0:29:39 0:07:59 smithi main rhel 8.4 orch/cephadm/rbd_iscsi/{base/install cluster/{fixed-3 openstack} pool/datapool supported-random-distro$/{rhel_8} workloads/ceph_iscsi} 3
fail 7076669 2022-10-20 14:54:30 2022-10-22 07:17:21 2022-10-22 07:30:51 0:13:30 0:06:21 0:07:09 smithi main centos 8.stream orch/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi167 with status 5: 'sudo systemctl stop ceph-452698f8-51db-11ed-8438-001a4aab830c@mon.a'

fail 7076670 2022-10-20 14:54:31 2022-10-22 07:17:41 2022-10-22 07:30:42 0:13:01 0:06:19 0:06:42 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi170 with status 5: 'sudo systemctl stop ceph-3efd0264-51db-11ed-8438-001a4aab830c@mon.smithi170'

fail 7076671 2022-10-20 14:54:32 2022-10-22 07:17:51 2022-10-22 07:30:38 0:12:47 0:06:12 0:06:35 smithi main centos 8.stream orch/cephadm/smoke-singlehost/{0-distro$/{centos_8.stream_container_tools} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi074 with status 5: 'sudo systemctl stop ceph-393860e4-51db-11ed-8438-001a4aab830c@mon.smithi074'

fail 7076672 2022-10-20 14:54:33 2022-10-22 07:17:52 2022-10-22 07:34:46 0:16:54 0:09:09 0:07:45 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi103 with status 5: 'sudo systemctl stop ceph-ef13e2a8-51db-11ed-8438-001a4aab830c@mon.a'

fail 7076673 2022-10-20 14:54:34 2022-10-22 07:19:22 2022-10-22 07:50:01 0:30:39 0:19:12 0:11:27 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls mon_election/classic} 2
Failure Reason:

Command failed on smithi008 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e18c9b52-51db-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)"\''

fail 7076674 2022-10-20 14:54:36 2022-10-22 07:20:33 2022-10-22 07:38:59 0:18:26 0:10:50 0:07:36 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi191 with status 5: 'sudo systemctl stop ceph-6e0b9b00-51dc-11ed-8438-001a4aab830c@mon.a'

pass 7076675 2022-10-20 14:54:37 2022-10-22 07:21:53 2022-10-22 07:39:56 0:18:03 0:11:09 0:06:54 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_adoption} 1
fail 7076676 2022-10-20 14:54:38 2022-10-22 07:21:54 2022-10-22 07:36:13 0:14:19 0:06:05 0:08:14 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/osd-delay rados thrashers/careful thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi087 with status 5: 'sudo systemctl stop ceph-f00beb6a-51db-11ed-8438-001a4aab830c@mon.a'

fail 7076677 2022-10-20 14:54:39 2022-10-22 07:22:44 2022-10-22 07:39:41 0:16:57 0:07:41 0:09:16 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi154 with status 5: 'sudo systemctl stop ceph-5c580f7e-51dc-11ed-8438-001a4aab830c@mon.smithi154'

fail 7076678 2022-10-20 14:54:41 2022-10-22 07:24:35 2022-10-22 07:41:25 0:16:50 0:09:55 0:06:55 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi046 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7076679 2022-10-20 14:54:42 2022-10-22 07:24:35 2022-10-22 07:40:37 0:16:02 0:07:50 0:08:12 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_3.0 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-82f68444-51dc-11ed-8438-001a4aab830c@mon.a'

fail 7076680 2022-10-20 14:54:43 2022-10-22 07:25:46 2022-10-22 07:40:49 0:15:03 0:06:10 0:08:53 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi066 with status 5: 'sudo systemctl stop ceph-97b14b30-51dc-11ed-8438-001a4aab830c@mon.a'

fail 7076681 2022-10-20 14:54:44 2022-10-22 07:26:26 2022-10-22 07:41:24 0:14:58 0:07:58 0:07:00 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi035 with status 5: 'sudo systemctl stop ceph-a3e700de-51dc-11ed-8438-001a4aab830c@mon.smithi035'

fail 7076682 2022-10-20 14:54:46 2022-10-22 07:26:27 2022-10-22 07:54:14 0:27:47 0:20:44 0:07:03 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi133 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f6352c76-51dc-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076683 2022-10-20 14:54:47 2022-10-22 07:27:07 2022-10-22 07:39:59 0:12:52 0:07:23 0:05:29 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-a6099f52-51dc-11ed-8438-001a4aab830c@mon.smithi043'

fail 7076684 2022-10-20 14:54:48 2022-10-22 07:27:17 2022-10-22 07:43:15 0:15:58 0:09:06 0:06:52 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi079 with status 5: 'sudo systemctl stop ceph-1c76ab9e-51dd-11ed-8438-001a4aab830c@mon.a'

fail 7076685 2022-10-20 14:54:49 2022-10-22 07:28:18 2022-10-22 07:43:59 0:15:41 0:06:52 0:08:49 smithi main ubuntu 18.04 orch/rook/smoke/{0-distro/ubuntu_18.04 0-kubeadm 1-rook 2-workload/none 3-final cluster/3-node k8s/1.21 net/calico rook/1.6.2} 3
Failure Reason:

Command failed on smithi074 with status 1: 'kubectl create -f rook/cluster/examples/kubernetes/ceph/crds.yaml -f rook/cluster/examples/kubernetes/ceph/common.yaml -f operator.yaml'

fail 7076686 2022-10-20 14:54:50 2022-10-22 07:30:49 2022-10-22 07:48:01 0:17:12 0:10:21 0:06:51 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_rhel8 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi167 with status 5: 'sudo systemctl stop ceph-a12914b2-51dd-11ed-8438-001a4aab830c@mon.a'

fail 7076687 2022-10-20 14:54:52 2022-10-22 07:30:59 2022-10-22 07:44:02 0:13:03 0:07:02 0:06:01 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi107 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

fail 7076688 2022-10-20 14:54:53 2022-10-22 07:30:59 2022-10-22 07:45:13 0:14:14 0:07:17 0:06:57 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_rhel8 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi057 with status 5: 'sudo systemctl stop ceph-5cd55096-51dd-11ed-8438-001a4aab830c@mon.a'

fail 7076689 2022-10-20 14:54:54 2022-10-22 07:31:50 2022-10-22 07:46:25 0:14:35 0:04:29 0:10:06 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi103 with status 5: 'sudo systemctl stop ceph-6b0ea8ec-51dd-11ed-8438-001a4aab830c@mon.smithi103'

fail 7076690 2022-10-20 14:54:55 2022-10-22 07:34:51 2022-10-22 07:49:18 0:14:27 0:06:17 0:08:10 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/octopus backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/rbd_cls} 3
Failure Reason:

Command failed on smithi017 with status 5: 'sudo systemctl stop ceph-ca23d7e4-51dd-11ed-8438-001a4aab830c@mon.a'

fail 7076691 2022-10-20 14:54:56 2022-10-22 07:36:01 2022-10-22 08:05:03 0:29:02 0:21:35 0:07:27 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi018 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 36fa3688-51de-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076692 2022-10-20 14:54:58 2022-10-22 07:36:12 2022-10-22 07:59:29 0:23:17 0:16:42 0:06:35 smithi main centos 8.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls mon_election/connectivity} 2
Failure Reason:

Command failed on smithi160 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid debd04a0-51dd-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076693 2022-10-20 14:54:59 2022-10-22 07:36:22 2022-10-22 07:51:06 0:14:44 0:08:54 0:05:50 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_nfs} 1
Failure Reason:

Command failed on smithi087 with status 5: 'sudo systemctl stop ceph-2e5a7506-51de-11ed-8438-001a4aab830c@mon.a'

fail 7076694 2022-10-20 14:55:00 2022-10-22 07:36:22 2022-10-22 07:53:19 0:16:57 0:05:00 0:11:57 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
Failure Reason:

Command failed on smithi085 with status 5: 'sudo systemctl stop ceph-55200836-51de-11ed-8438-001a4aab830c@mon.smithi085'

fail 7076695 2022-10-20 14:55:01 2022-10-22 07:37:53 2022-10-22 07:51:50 0:13:57 0:07:20 0:06:37 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi191 with status 5: 'sudo systemctl stop ceph-4be3dc70-51de-11ed-8438-001a4aab830c@mon.smithi191'

fail 7076696 2022-10-20 14:55:02 2022-10-22 07:39:04 2022-10-22 07:51:45 0:12:41 0:04:48 0:07:53 smithi main ubuntu 18.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi154 with status 5: 'sudo systemctl stop ceph-34796366-51de-11ed-8438-001a4aab830c@mon.a'

fail 7076697 2022-10-20 14:55:03 2022-10-22 07:39:44 2022-10-22 08:02:50 0:23:06 0:16:51 0:06:15 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.5 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi043 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6108f662-51de-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076698 2022-10-20 14:55:05 2022-10-22 07:40:04 2022-10-22 07:53:50 0:13:46 0:06:44 0:07:02 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

Command failed on smithi036 with status 5: 'sudo systemctl stop ceph-88d1e78a-51de-11ed-8438-001a4aab830c@mon.smithi036'

fail 7076699 2022-10-20 14:55:06 2022-10-22 07:40:45 2022-10-22 07:57:35 0:16:50 0:09:27 0:07:23 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-e2724050-51de-11ed-8438-001a4aab830c@mon.a'

fail 7076700 2022-10-20 14:55:07 2022-10-22 07:40:45 2022-10-22 07:54:18 0:13:33 0:06:37 0:06:56 smithi main ubuntu 18.04 orch/cephadm/with-work/{0-distro/ubuntu_18.04 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi145 with status 5: 'sudo systemctl stop ceph-8ad5657a-51de-11ed-8438-001a4aab830c@mon.a'

fail 7076701 2022-10-20 14:55:08 2022-10-22 07:40:55 2022-10-22 07:58:58 0:18:03 0:09:28 0:08:35 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi066 with status 5: 'sudo systemctl stop ceph-1612a1c0-51df-11ed-8438-001a4aab830c@mon.a'

fail 7076702 2022-10-20 14:55:09 2022-10-22 07:40:56 2022-10-22 07:54:39 0:13:43 0:06:10 0:07:33 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/luminous-v1only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

Command failed on smithi035 with status 5: 'sudo systemctl stop ceph-85d0c204-51de-11ed-8438-001a4aab830c@mon.a'

fail 7076703 2022-10-20 14:55:11 2022-10-22 07:41:26 2022-10-22 08:08:15 0:26:49 0:20:41 0:06:08 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi003 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid faa7be0c-51de-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076704 2022-10-20 14:55:12 2022-10-22 07:41:37 2022-10-22 07:57:45 0:16:08 0:04:56 0:11:12 smithi main ubuntu 20.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_20.04 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi078 with status 5: 'sudo systemctl stop ceph-f5b5964e-51de-11ed-8438-001a4aab830c@mon.a'

fail 7076705 2022-10-20 14:55:13 2022-10-22 07:42:17 2022-10-22 07:58:17 0:16:00 0:08:13 0:07:47 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
Failure Reason:

Command failed on smithi012 with status 5: 'sudo systemctl stop ceph-09078716-51df-11ed-8438-001a4aab830c@mon.smithi012'

fail 7076706 2022-10-20 14:55:14 2022-10-22 07:42:17 2022-10-22 08:01:07 0:18:50 0:10:06 0:08:44 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi074 with status 5: 'sudo systemctl stop ceph-74da3f74-51df-11ed-8438-001a4aab830c@mon.a'

fail 7076707 2022-10-20 14:55:15 2022-10-22 07:44:08 2022-10-22 07:56:45 0:12:37 0:04:55 0:07:42 smithi main ubuntu 18.04 orch/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi159 with status 5: 'sudo systemctl stop ceph-eb711974-51de-11ed-8438-001a4aab830c@mon.smithi159'

fail 7076708 2022-10-20 14:55:17 2022-10-22 07:45:19 2022-10-22 07:58:58 0:13:39 0:06:05 0:07:34 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/luminous backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/fastclose rados thrashers/none thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi057 with status 5: 'sudo systemctl stop ceph-1d337a42-51df-11ed-8438-001a4aab830c@mon.a'

fail 7076709 2022-10-20 14:55:18 2022-10-22 07:45:19 2022-10-22 07:59:44 0:14:25 0:07:21 0:07:04 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

Command failed on smithi103 with status 5: 'sudo systemctl stop ceph-67fcd712-51df-11ed-8438-001a4aab830c@mon.smithi103'

fail 7076710 2022-10-20 14:55:19 2022-10-22 07:46:30 2022-10-22 08:04:06 0:17:36 0:05:28 0:12:08 smithi main orch/cephadm/dashboard/{0-distro/ignorelist_health task/test_e2e} 2
Failure Reason:

Failed to fetch package version from https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a

fail 7076711 2022-10-20 14:55:20 2022-10-22 07:47:20 2022-10-22 08:15:08 0:27:48 0:20:20 0:07:28 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi167 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e6662b80-51df-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076712 2022-10-20 14:55:21 2022-10-22 07:48:11 2022-10-22 08:02:19 0:14:08 0:06:40 0:07:28 smithi main centos 8.stream orch/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi037 with status 5: 'sudo systemctl stop ceph-b5c457f4-51df-11ed-8438-001a4aab830c@mon.a'

fail 7076713 2022-10-20 14:55:23 2022-10-22 07:49:21 2022-10-22 08:00:14 0:10:53 0:04:30 0:06:23 smithi main ubuntu 18.04 orch/cephadm/smoke-singlehost/{0-distro$/{ubuntu_18.04} 1-start 2-services/rgw 3-final} 1
Failure Reason:

Command failed on smithi017 with status 5: 'sudo systemctl stop ceph-5c766322-51df-11ed-8438-001a4aab830c@mon.smithi017'

fail 7076714 2022-10-20 14:55:24 2022-10-22 07:49:22 2022-10-22 08:07:05 0:17:43 0:09:40 0:08:03 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi008 with status 5: 'sudo systemctl stop ceph-3e83de20-51e0-11ed-8438-001a4aab830c@mon.a'

fail 7076715 2022-10-20 14:55:25 2022-10-22 07:50:02 2022-10-22 08:24:37 0:34:35 0:23:19 0:11:16 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls mon_election/classic} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.9 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3faee038-51e0-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.mgr | length == 2\'"\'"\'\''

fail 7076716 2022-10-20 14:55:26 2022-10-22 07:51:13 2022-10-22 08:09:07 0:17:54 0:07:11 0:10:43 smithi main ubuntu 20.04 orch/cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi191 with status 5: 'sudo systemctl stop ceph-96a8b21a-51e0-11ed-8438-001a4aab830c@mon.a'

pass 7076717 2022-10-20 14:55:27 2022-10-22 07:51:53 2022-10-22 08:09:01 0:17:08 0:11:13 0:05:55 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_adoption} 1
fail 7076718 2022-10-20 14:55:29 2022-10-22 07:51:54 2022-10-22 08:03:55 0:12:01 0:05:12 0:06:49 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/rgw 3-final} 2
Failure Reason:

Command failed on smithi084 with status 5: 'sudo systemctl stop ceph-f63df5d8-51df-11ed-8438-001a4aab830c@mon.smithi084'

fail 7076719 2022-10-20 14:55:30 2022-10-22 07:52:34 2022-10-22 08:06:23 0:13:49 0:06:33 0:07:16 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/mimic-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/few rados thrashers/pggrow thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi045 with status 5: 'sudo systemctl stop ceph-37493416-51e0-11ed-8438-001a4aab830c@mon.a'

fail 7076720 2022-10-20 14:55:31 2022-10-22 07:53:05 2022-10-22 08:10:02 0:16:57 0:10:14 0:06:43 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi080 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7076721 2022-10-20 14:55:32 2022-10-22 07:53:05 2022-10-22 08:04:42 0:11:37 0:05:17 0:06:20 smithi main ubuntu 18.04 orch/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/repave-all} 2
Failure Reason:

Command failed on smithi085 with status 5: 'sudo systemctl stop ceph-155b1a2c-51e0-11ed-8438-001a4aab830c@mon.smithi085'

fail 7076722 2022-10-20 14:55:33 2022-10-22 07:53:25 2022-10-22 08:08:53 0:15:28 0:08:23 0:07:05 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_3.0 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi059 with status 5: 'sudo systemctl stop ceph-89f7321c-51e0-11ed-8438-001a4aab830c@mon.a'

fail 7076723 2022-10-20 14:55:35 2022-10-22 07:53:56 2022-10-22 08:09:25 0:15:29 0:05:22 0:10:07 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/basic 3-final} 2
Failure Reason:

Command failed on smithi006 with status 5: 'sudo systemctl stop ceph-a6d7277a-51e0-11ed-8438-001a4aab830c@mon.smithi006'

fail 7076724 2022-10-20 14:55:36 2022-10-22 07:53:56 2022-10-22 08:22:56 0:29:00 0:21:45 0:07:15 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi036 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ce763000-51e0-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076725 2022-10-20 14:55:37 2022-10-22 07:53:57 2022-10-22 08:21:13 0:27:16 0:20:40 0:06:36 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi133 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7262f9a6-51e0-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076726 2022-10-20 14:55:38 2022-10-22 07:54:17 2022-10-22 08:07:31 0:13:14 0:07:07 0:06:07 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi145 with status 5: 'sudo systemctl stop ceph-7fc22aae-51e0-11ed-8438-001a4aab830c@mon.smithi145'

fail 7076727 2022-10-20 14:55:39 2022-10-22 07:54:28 2022-10-22 08:11:35 0:17:07 0:10:05 0:07:02 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi046 with status 5: 'sudo systemctl stop ceph-ed7b672c-51e0-11ed-8438-001a4aab830c@mon.a'

fail 7076728 2022-10-20 14:55:41 2022-10-22 07:54:48 2022-10-22 08:13:24 0:18:36 0:09:42 0:08:54 smithi main centos 8.stream orch/cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi097 with status 5: 'sudo systemctl stop ceph-20924284-51e1-11ed-8438-001a4aab830c@mon.a'

fail 7076729 2022-10-20 14:55:42 2022-10-22 07:56:09 2022-10-22 08:10:36 0:14:27 0:07:51 0:06:36 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi035 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

fail 7076730 2022-10-20 14:55:43 2022-10-22 07:56:09 2022-10-22 08:10:59 0:14:50 0:06:25 0:08:25 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/mimic backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/osd-delay rados thrashers/careful thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-d7327852-51e0-11ed-8438-001a4aab830c@mon.a'

fail 7076731 2022-10-20 14:55:44 2022-10-22 07:57:40 2022-10-22 08:12:40 0:15:00 0:07:43 0:07:17 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_rhel8 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi078 with status 5: 'sudo systemctl stop ceph-fbc7f0e8-51e0-11ed-8438-001a4aab830c@mon.a'

fail 7076732 2022-10-20 14:55:46 2022-10-22 07:57:50 2022-10-22 08:14:09 0:16:19 0:08:22 0:07:57 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi012 with status 5: 'sudo systemctl stop ceph-46687ea6-51e1-11ed-8438-001a4aab830c@mon.smithi012'

fail 7076733 2022-10-20 14:55:47 2022-10-22 07:58:20 2022-10-22 08:29:07 0:30:47 0:22:06 0:08:41 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi066 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a341eb3a-51e1-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076734 2022-10-20 14:55:48 2022-10-22 07:59:01 2022-10-22 08:28:21 0:29:20 0:19:07 0:10:13 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls mon_election/connectivity} 2
Failure Reason:

Command failed on smithi057 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5684dcd0-51e1-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076735 2022-10-20 14:55:49 2022-10-22 07:59:01 2022-10-22 08:16:00 0:16:59 0:09:24 0:07:35 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_nfs} 1
Failure Reason:

Command failed on smithi159 with status 5: 'sudo systemctl stop ceph-73be664a-51e1-11ed-8438-001a4aab830c@mon.a'

fail 7076736 2022-10-20 14:55:51 2022-10-22 07:59:02 2022-10-22 08:14:55 0:15:53 0:05:06 0:10:47 smithi main ubuntu 20.04 orch/cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi160 with status 5: 'sudo systemctl stop ceph-5888a9b2-51e1-11ed-8438-001a4aab830c@mon.smithi160'

fail 7076737 2022-10-20 14:55:52 2022-10-22 07:59:32 2022-10-22 08:14:00 0:14:28 0:06:36 0:07:52 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/rbd_cls} 3
Failure Reason:

Command failed on smithi017 with status 5: 'sudo systemctl stop ceph-49c9ecb0-51e1-11ed-8438-001a4aab830c@mon.a'

fail 7076738 2022-10-20 14:55:53 2022-10-22 08:00:23 2022-10-22 08:12:23 0:12:00 0:05:03 0:06:57 smithi main ubuntu 18.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi120 with status 5: 'sudo systemctl stop ceph-1f6ffb44-51e1-11ed-8438-001a4aab830c@mon.a'

fail 7076739 2022-10-20 14:55:54 2022-10-22 08:01:13 2022-10-22 08:16:29 0:15:16 0:06:37 0:08:39 smithi main ubuntu 20.04 orch/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/master} 1
Failure Reason:

[Errno 2] Cannot find file on the remote 'ubuntu@smithi170.front.sepia.ceph.com': 'rook/cluster/examples/kubernetes/ceph/operator.yaml'

fail 7076740 2022-10-20 14:55:55 2022-10-22 08:01:14 2022-10-22 08:14:13 0:12:59 0:07:29 0:05:30 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi074 with status 5: 'sudo systemctl stop ceph-71f10f02-51e1-11ed-8438-001a4aab830c@mon.smithi074'

fail 7076741 2022-10-20 14:55:57 2022-10-22 08:01:15 2022-10-22 08:19:19 0:18:04 0:09:41 0:08:23 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi037 with status 5: 'sudo systemctl stop ceph-f48d9eb2-51e1-11ed-8438-001a4aab830c@mon.a'

fail 7076742 2022-10-20 14:55:58 2022-10-22 08:02:25 2022-10-22 08:19:39 0:17:14 0:11:29 0:05:45 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_3.0 fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-33d47140-51e2-11ed-8438-001a4aab830c@mon.a'

fail 7076743 2022-10-20 14:55:59 2022-10-22 08:02:56 2022-10-22 08:18:48 0:15:52 0:09:09 0:06:43 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_orch_cli} 1
Failure Reason:

Command failed on smithi116 with status 5: 'sudo systemctl stop ceph-166be96c-51e2-11ed-8438-001a4aab830c@mon.a'

fail 7076744 2022-10-20 14:56:00 2022-10-22 08:03:56 2022-10-22 08:15:26 0:11:30 0:05:00 0:06:30 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi081 with status 5: 'sudo systemctl stop ceph-8ac991e8-51e1-11ed-8438-001a4aab830c@mon.smithi081'

fail 7076745 2022-10-20 14:56:01 2022-10-22 08:04:17 2022-10-22 08:31:47 0:27:30 0:20:57 0:06:33 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi085 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3eb35b76-51e2-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076746 2022-10-20 14:56:03 2022-10-22 08:04:47 2022-10-22 08:19:14 0:14:27 0:06:21 0:08:06 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

Command failed on smithi018 with status 5: 'sudo systemctl stop ceph-fbf4cc02-51e1-11ed-8438-001a4aab830c@mon.a'

fail 7076747 2022-10-20 14:56:04 2022-10-22 08:05:08 2022-10-22 08:20:48 0:15:40 0:05:07 0:10:33 smithi main ubuntu 20.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_20.04 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi083 with status 5: 'sudo systemctl stop ceph-363246ce-51e2-11ed-8438-001a4aab830c@mon.a'

fail 7076748 2022-10-20 14:56:05 2022-10-22 08:05:29 2022-10-22 08:24:20 0:18:51 0:10:13 0:08:38 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi008 with status 5: 'sudo systemctl stop ceph-b89be8d6-51e2-11ed-8438-001a4aab830c@mon.a'

fail 7076749 2022-10-20 14:56:06 2022-10-22 08:07:09 2022-10-22 08:20:46 0:13:37 0:06:45 0:06:52 smithi main centos 8.stream orch/cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi145 with status 5: 'sudo systemctl stop ceph-4d01ecd8-51e2-11ed-8438-001a4aab830c@mon.smithi145'

fail 7076750 2022-10-20 14:56:07 2022-10-22 08:07:41 2022-10-22 08:23:27 0:15:46 0:05:21 0:10:25 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi071 with status 5: 'sudo systemctl stop ceph-99804c58-51e2-11ed-8438-001a4aab830c@mon.smithi071'

fail 7076751 2022-10-20 14:56:09 2022-10-22 08:08:21 2022-10-22 08:26:08 0:17:47 0:09:38 0:08:09 smithi main centos 8.stream orch/cephadm/dashboard/{0-distro/centos_8.stream_container_tools task/test_e2e} 2
Failure Reason:

Command failed on smithi143 with status 5: 'sudo systemctl stop ceph-e602254c-51e2-11ed-8438-001a4aab830c@mon.a'

fail 7076752 2022-10-20 14:56:10 2022-10-22 08:08:42 2022-10-22 08:38:23 0:29:41 0:21:35 0:08:06 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi003 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f03bac18-51e2-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076753 2022-10-20 14:56:11 2022-10-22 08:08:42 2022-10-22 08:38:09 0:29:27 0:21:39 0:07:48 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi099 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 808d88e6-51e2-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

pass 7076754 2022-10-20 14:56:12 2022-10-22 08:09:03 2022-10-22 08:32:30 0:23:27 0:13:26 0:10:01 smithi main ubuntu 20.04 orch/cephadm/orchestrator_cli/{0-random-distro$/{ubuntu_20.04} 2-node-mgr orchestrator_cli} 2
pass 7076755 2022-10-20 14:56:13 2022-10-22 08:09:14 2022-10-22 08:46:43 0:37:29 0:30:21 0:07:08 smithi main rhel 8.4 orch/cephadm/rbd_iscsi/{base/install cluster/{fixed-3 openstack} pool/datapool supported-random-distro$/{rhel_8} workloads/ceph_iscsi} 3
fail 7076756 2022-10-20 14:56:15 2022-10-22 08:09:34 2022-10-22 08:23:28 0:13:54 0:07:01 0:06:53 smithi main centos 8.stream orch/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi035 with status 5: 'sudo systemctl stop ceph-b6a63748-51e2-11ed-8438-001a4aab830c@mon.a'

fail 7076757 2022-10-20 14:56:16 2022-10-22 08:10:45 2022-10-22 08:24:03 0:13:18 0:07:01 0:06:17 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-cc115356-51e2-11ed-8438-001a4aab830c@mon.smithi040'

fail 7076758 2022-10-20 14:56:17 2022-10-22 08:11:05 2022-10-22 08:23:59 0:12:54 0:07:28 0:05:26 smithi main rhel 8.4 orch/cephadm/smoke-singlehost/{0-distro$/{rhel_8.4_container_tools_rhel8} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi142 with status 5: 'sudo systemctl stop ceph-cf9b53a0-51e2-11ed-8438-001a4aab830c@mon.smithi142'

fail 7076759 2022-10-20 14:56:18 2022-10-22 08:11:06 2022-10-22 08:28:29 0:17:23 0:10:07 0:07:16 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi046 with status 5: 'sudo systemctl stop ceph-4bf461c6-51e3-11ed-8438-001a4aab830c@mon.a'

fail 7076760 2022-10-20 14:56:19 2022-10-22 08:11:36 2022-10-22 08:35:19 0:23:43 0:17:03 0:06:40 smithi main centos 8.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls mon_election/classic} 2
Failure Reason:

Command failed on smithi120 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f5ae782e-51e2-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076761 2022-10-20 14:56:21 2022-10-22 08:12:27 2022-10-22 08:29:54 0:17:27 0:11:07 0:06:20 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_rhel8 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi078 with status 5: 'sudo systemctl stop ceph-9721bf04-51e3-11ed-8438-001a4aab830c@mon.a'

pass 7076762 2022-10-20 14:56:22 2022-10-22 08:12:47 2022-10-22 08:30:34 0:17:47 0:11:20 0:06:27 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_adoption} 1
fail 7076763 2022-10-20 14:56:23 2022-10-22 08:13:28 2022-10-22 08:27:44 0:14:16 0:06:45 0:07:31 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi017 with status 5: 'sudo systemctl stop ceph-39edf8e8-51e3-11ed-8438-001a4aab830c@mon.a'

fail 7076764 2022-10-20 14:56:24 2022-10-22 08:14:08 2022-10-22 08:30:10 0:16:02 0:08:24 0:07:38 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
Failure Reason:

Command failed on smithi012 with status 5: 'sudo systemctl stop ceph-84053810-51e3-11ed-8438-001a4aab830c@mon.smithi012'

fail 7076765 2022-10-20 14:56:25 2022-10-22 08:14:19 2022-10-22 08:29:17 0:14:58 0:08:10 0:06:48 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi074 with status 5: 'sudo systemctl stop ceph-5bf72e96-51e3-11ed-8438-001a4aab830c@mon.smithi074'

fail 7076766 2022-10-20 14:56:27 2022-10-22 08:14:19 2022-10-22 08:31:11 0:16:52 0:10:13 0:06:39 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi121 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7076767 2022-10-20 14:56:28 2022-10-22 08:14:20 2022-10-22 08:30:06 0:15:46 0:08:09 0:07:37 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_3.0 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi160 with status 5: 'sudo systemctl stop ceph-7874aa12-51e3-11ed-8438-001a4aab830c@mon.a'

fail 7076768 2022-10-20 14:56:29 2022-10-22 08:15:00 2022-10-22 08:28:56 0:13:56 0:06:46 0:07:10 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/octopus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/fastclose rados thrashers/none thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi081 with status 5: 'sudo systemctl stop ceph-6686832a-51e3-11ed-8438-001a4aab830c@mon.a'

fail 7076769 2022-10-20 14:56:30 2022-10-22 08:15:31 2022-10-22 08:45:04 0:29:33 0:21:44 0:07:49 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi159 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d6f2888e-51e3-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076770 2022-10-20 14:56:32 2022-10-22 08:16:01 2022-10-22 08:31:56 0:15:55 0:07:31 0:08:24 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

Command failed on smithi061 with status 5: 'sudo systemctl stop ceph-ecfa3474-51e3-11ed-8438-001a4aab830c@mon.smithi061'

fail 7076771 2022-10-20 14:56:33 2022-10-22 08:18:42 2022-10-22 08:36:31 0:17:49 0:09:25 0:08:24 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi090 with status 5: 'sudo systemctl stop ceph-5116c558-51e4-11ed-8438-001a4aab830c@mon.a'

fail 7076772 2022-10-20 14:56:34 2022-10-22 08:18:42 2022-10-22 08:32:33 0:13:51 0:06:38 0:07:13 smithi main ubuntu 18.04 orch/cephadm/with-work/{0-distro/ubuntu_18.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi116 with status 5: 'sudo systemctl stop ceph-e3c349a4-51e3-11ed-8438-001a4aab830c@mon.a'

fail 7076773 2022-10-20 14:56:35 2022-10-22 08:19:23 2022-10-22 08:33:32 0:14:09 0:07:33 0:06:36 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi037 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

fail 7076774 2022-10-20 14:56:37 2022-10-22 08:19:23 2022-10-22 08:32:22 0:12:59 0:07:35 0:05:24 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_rhel8 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi018 with status 5: 'sudo systemctl stop ceph-fd799916-51e3-11ed-8438-001a4aab830c@mon.a'

fail 7076775 2022-10-20 14:56:38 2022-10-22 08:19:24 2022-10-22 08:30:40 0:11:16 0:04:54 0:06:22 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-a8371168-51e3-11ed-8438-001a4aab830c@mon.smithi043'

fail 7076776 2022-10-20 14:56:39 2022-10-22 08:19:44 2022-10-22 08:34:22 0:14:38 0:06:06 0:08:32 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/luminous-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/few rados thrashers/careful thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi083 with status 5: 'sudo systemctl stop ceph-0fa5cede-51e4-11ed-8438-001a4aab830c@mon.a'

fail 7076777 2022-10-20 14:56:40 2022-10-22 08:20:55 2022-10-22 08:34:45 0:13:50 0:07:32 0:06:18 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi027 with status 5: 'sudo systemctl stop ceph-5275090a-51e4-11ed-8438-001a4aab830c@mon.smithi027'

fail 7076778 2022-10-20 14:56:42 2022-10-22 08:20:55 2022-10-22 08:48:15 0:27:20 0:21:05 0:06:15 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi133 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 894e1ab6-51e4-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076779 2022-10-20 14:56:43 2022-10-22 08:21:16 2022-10-22 08:54:32 0:33:16 0:22:33 0:10:43 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls mon_election/connectivity} 2
Failure Reason:

Command failed on smithi036 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.9 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a788053c-51e4-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.mgr | length == 2\'"\'"\'\''

fail 7076780 2022-10-20 14:56:44 2022-10-22 08:23:07 2022-10-22 08:40:24 0:17:17 0:09:16 0:08:01 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_nfs} 1
Failure Reason:

Command failed on smithi080 with status 5: 'sudo systemctl stop ceph-d7c389a6-51e4-11ed-8438-001a4aab830c@mon.a'

fail 7076781 2022-10-20 14:56:45 2022-10-22 08:23:37 2022-10-22 08:38:41 0:15:04 0:05:09 0:09:55 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

Command failed on smithi035 with status 5: 'sudo systemctl stop ceph-b392f58a-51e4-11ed-8438-001a4aab830c@mon.smithi035'

fail 7076782 2022-10-20 14:56:47 2022-10-22 08:23:37 2022-10-22 08:35:25 0:11:48 0:04:59 0:06:49 smithi main ubuntu 18.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-54f2e8f0-51e4-11ed-8438-001a4aab830c@mon.a'

fail 7076783 2022-10-20 14:56:48 2022-10-22 08:24:08 2022-10-22 08:37:54 0:13:46 0:06:13 0:07:33 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/luminous backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/osd-delay rados thrashers/default thrashosds-health workloads/rbd_cls} 3
Failure Reason:

Command failed on smithi045 with status 5: 'sudo systemctl stop ceph-91d20d64-51e4-11ed-8438-001a4aab830c@mon.a'

fail 7076784 2022-10-20 14:56:49 2022-10-22 08:24:28 2022-10-22 08:47:36 0:23:08 0:17:01 0:06:07 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.5 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi008 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ad90bffa-51e4-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076785 2022-10-20 14:56:51 2022-10-22 08:24:29 2022-10-22 08:37:10 0:12:41 0:06:45 0:05:56 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/rgw 3-final} 2
Failure Reason:

Command failed on smithi071 with status 5: 'sudo systemctl stop ceph-975766d0-51e4-11ed-8438-001a4aab830c@mon.smithi071'

fail 7076786 2022-10-20 14:56:52 2022-10-22 08:24:29 2022-10-22 08:41:33 0:17:04 0:09:34 0:07:30 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi079 with status 5: 'sudo systemctl stop ceph-0ad52732-51e5-11ed-8438-001a4aab830c@mon.a'

fail 7076787 2022-10-20 14:56:53 2022-10-22 08:24:39 2022-10-22 08:42:26 0:17:47 0:06:54 0:10:53 smithi main ubuntu 20.04 orch/cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi102 with status 5: 'sudo systemctl stop ceph-356a70d8-51e5-11ed-8438-001a4aab830c@mon.a'

fail 7076788 2022-10-20 14:56:54 2022-10-22 08:24:50 2022-10-22 08:41:12 0:16:22 0:09:42 0:06:40 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi026 with status 5: 'sudo systemctl stop ceph-03ad81de-51e5-11ed-8438-001a4aab830c@mon.a'

fail 7076789 2022-10-20 14:56:56 2022-10-22 08:24:50 2022-10-22 08:53:27 0:28:37 0:20:42 0:07:55 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi143 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 48834348-51e5-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076790 2022-10-20 14:56:57 2022-10-22 08:26:11 2022-10-22 08:43:09 0:16:58 0:07:38 0:09:20 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/repave-all} 2
Failure Reason:

Command failed on smithi097 with status 5: 'sudo systemctl stop ceph-3a6f0d50-51e5-11ed-8438-001a4aab830c@mon.smithi097'

fail 7076791 2022-10-20 14:56:58 2022-10-22 08:27:51 2022-10-22 08:43:35 0:15:44 0:05:24 0:10:20 smithi main ubuntu 20.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_20.04 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi057 with status 5: 'sudo systemctl stop ceph-6c3cbe72-51e5-11ed-8438-001a4aab830c@mon.a'

fail 7076792 2022-10-20 14:56:59 2022-10-22 08:28:22 2022-10-22 08:43:30 0:15:08 0:08:19 0:06:49 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/basic 3-final} 2
Failure Reason:

Command failed on smithi046 with status 5: 'sudo systemctl stop ceph-5dc154e8-51e5-11ed-8438-001a4aab830c@mon.smithi046'

fail 7076793 2022-10-20 14:57:01 2022-10-22 08:28:32 2022-10-22 08:47:10 0:18:38 0:10:30 0:08:08 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi066 with status 5: 'sudo systemctl stop ceph-f3ad8a94-51e5-11ed-8438-001a4aab830c@mon.a'

fail 7076794 2022-10-20 14:57:02 2022-10-22 08:29:13 2022-10-22 08:42:01 0:12:48 0:06:35 0:06:13 smithi main ubuntu 18.04 orch/rook/smoke/{0-distro/ubuntu_18.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/1.6.2} 1
Failure Reason:

Command failed on smithi017 with status 1: 'kubectl create -f rook/cluster/examples/kubernetes/ceph/crds.yaml -f rook/cluster/examples/kubernetes/ceph/common.yaml -f operator.yaml'

fail 7076795 2022-10-20 14:57:03 2022-10-22 08:29:13 2022-10-22 08:43:22 0:14:09 0:06:25 0:07:44 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/mimic-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/fastclose rados thrashers/mapgap thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

Command failed on smithi078 with status 5: 'sudo systemctl stop ceph-5cc3537a-51e5-11ed-8438-001a4aab830c@mon.a'

fail 7076796 2022-10-20 14:57:04 2022-10-22 08:30:04 2022-10-22 08:46:06 0:16:02 0:07:59 0:08:03 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi012 with status 5: 'sudo systemctl stop ceph-b15c2e3e-51e5-11ed-8438-001a4aab830c@mon.smithi012'

fail 7076797 2022-10-20 14:57:06 2022-10-22 08:30:14 2022-10-22 08:45:59 0:15:45 0:05:40 0:10:05 smithi main orch/cephadm/dashboard/{0-distro/ignorelist_health task/test_e2e} 2
Failure Reason:

Failed to fetch package version from https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a

fail 7076798 2022-10-20 14:57:07 2022-10-22 08:30:15 2022-10-22 08:59:52 0:29:37 0:21:39 0:07:58 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi074 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid eedb12f2-51e5-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076799 2022-10-20 14:57:08 2022-10-22 08:30:35 2022-10-22 08:43:24 0:12:49 0:07:05 0:05:44 smithi main centos 8.stream orch/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-823deb38-51e5-11ed-8438-001a4aab830c@mon.a'

fail 7076800 2022-10-20 14:57:10 2022-10-22 08:30:46 2022-10-22 08:45:55 0:15:09 0:07:56 0:07:13 smithi main rhel 8.4 orch/cephadm/smoke-singlehost/{0-distro$/{rhel_8.4_container_tools_3.0} 1-start 2-services/rgw 3-final} 1
Failure Reason:

Command failed on smithi121 with status 5: 'sudo systemctl stop ceph-a77d0226-51e5-11ed-8438-001a4aab830c@mon.smithi121'

fail 7076801 2022-10-20 14:57:11 2022-10-22 08:31:16 2022-10-22 08:48:50 0:17:34 0:09:43 0:07:51 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi085 with status 5: 'sudo systemctl stop ceph-14ceea60-51e6-11ed-8438-001a4aab830c@mon.a'

fail 7076802 2022-10-20 14:57:12 2022-10-22 08:31:56 2022-10-22 09:01:30 0:29:34 0:19:16 0:10:18 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls mon_election/classic} 2
Failure Reason:

Command failed on smithi061 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f7ed7ea2-51e5-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076803 2022-10-20 14:57:13 2022-10-22 08:32:07 2022-10-22 08:49:21 0:17:14 0:09:58 0:07:16 smithi main centos 8.stream orch/cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi018 with status 5: 'sudo systemctl stop ceph-30d030ca-51e6-11ed-8438-001a4aab830c@mon.a'

pass 7076804 2022-10-20 14:57:15 2022-10-22 08:32:27 2022-10-22 08:49:21 0:16:54 0:11:24 0:05:30 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_adoption} 1
fail 7076805 2022-10-20 14:57:16 2022-10-22 08:32:38 2022-10-22 08:43:56 0:11:18 0:05:20 0:05:58 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi116 with status 5: 'sudo systemctl stop ceph-929de154-51e5-11ed-8438-001a4aab830c@mon.smithi116'

fail 7076806 2022-10-20 14:57:17 2022-10-22 08:32:38 2022-10-22 08:47:56 0:15:18 0:06:35 0:08:43 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/mimic backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/few rados thrashers/morepggrow thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi083 with status 5: 'sudo systemctl stop ceph-0693af4e-51e6-11ed-8438-001a4aab830c@mon.a'

fail 7076807 2022-10-20 14:57:19 2022-10-22 08:34:29 2022-10-22 08:45:47 0:11:18 0:05:19 0:05:59 smithi main ubuntu 18.04 orch/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi037 with status 5: 'sudo systemctl stop ceph-d45e8058-51e5-11ed-8438-001a4aab830c@mon.smithi037'

fail 7076808 2022-10-20 14:57:20 2022-10-22 08:34:29 2022-10-22 08:51:37 0:17:08 0:10:16 0:06:52 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi145 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7076809 2022-10-20 14:57:21 2022-10-22 08:34:50 2022-10-22 08:50:49 0:15:59 0:08:18 0:07:41 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_3.0 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi163 with status 5: 'sudo systemctl stop ceph-625d55f0-51e6-11ed-8438-001a4aab830c@mon.a'

fail 7076810 2022-10-20 14:57:23 2022-10-22 08:35:00 2022-10-22 08:51:11 0:16:11 0:05:05 0:11:06 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi027 with status 5: 'sudo systemctl stop ceph-939e342c-51e6-11ed-8438-001a4aab830c@mon.smithi027'

fail 7076811 2022-10-20 14:57:24 2022-10-22 08:35:00 2022-10-22 09:04:16 0:29:16 0:21:15 0:08:01 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi120 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 8993e300-51e6-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076812 2022-10-20 14:57:26 2022-10-22 08:35:21 2022-10-22 08:50:39 0:15:18 0:06:39 0:08:39 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/osd-delay rados thrashers/none thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi090 with status 5: 'sudo systemctl stop ceph-6a479faa-51e6-11ed-8438-001a4aab830c@mon.a'

fail 7076813 2022-10-20 14:57:27 2022-10-22 08:36:41 2022-10-22 09:04:01 0:27:20 0:20:32 0:06:48 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi071 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 635c1b26-51e6-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076814 2022-10-20 14:57:28 2022-10-22 08:37:12 2022-10-22 08:51:00 0:13:48 0:06:51 0:06:57 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi130 with status 5: 'sudo systemctl stop ceph-8989ea3a-51e6-11ed-8438-001a4aab830c@mon.smithi130'

fail 7076815 2022-10-20 14:57:29 2022-10-22 08:38:02 2022-10-22 08:54:33 0:16:31 0:10:04 0:06:27 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-eec7b256-51e6-11ed-8438-001a4aab830c@mon.a'

fail 7076816 2022-10-20 14:57:31 2022-10-22 08:38:03 2022-10-22 08:55:21 0:17:18 0:11:17 0:06:01 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi099 with status 5: 'sudo systemctl stop ceph-298e4a58-51e7-11ed-8438-001a4aab830c@mon.a'

fail 7076817 2022-10-20 14:57:32 2022-10-22 08:38:13 2022-10-22 08:54:09 0:15:56 0:07:28 0:08:28 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi157 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

fail 7076818 2022-10-20 14:57:33 2022-10-22 08:38:34 2022-10-22 08:53:17 0:14:43 0:07:49 0:06:54 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_rhel8 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi035 with status 5: 'sudo systemctl stop ceph-ab334f50-51e6-11ed-8438-001a4aab830c@mon.a'

fail 7076819 2022-10-20 14:57:34 2022-10-22 08:38:44 2022-10-22 08:55:48 0:17:04 0:05:27 0:11:37 smithi main ubuntu 20.04 orch/cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi003 with status 5: 'sudo systemctl stop ceph-20baa8ae-51e7-11ed-8438-001a4aab830c@mon.smithi003'

fail 7076820 2022-10-20 14:57:36 2022-10-22 08:40:35 2022-10-22 08:56:23 0:15:48 0:08:03 0:07:45 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi079 with status 5: 'sudo systemctl stop ceph-21c8ebd4-51e7-11ed-8438-001a4aab830c@mon.smithi079'

fail 7076821 2022-10-20 14:57:37 2022-10-22 08:41:35 2022-10-22 09:10:35 0:29:00 0:22:33 0:06:27 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi017 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 73dd70e8-51e7-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7076822 2022-10-20 14:57:38 2022-10-22 08:42:06 2022-10-22 09:05:44 0:23:38 0:16:45 0:06:53 smithi main centos 8.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls mon_election/connectivity} 2
Failure Reason:

Command failed on smithi102 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 359459f0-51e7-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)"\''

fail 7076823 2022-10-20 14:57:39 2022-10-22 08:42:37 2022-10-22 09:00:33 0:17:56 0:09:29 0:08:27 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_nfs} 1
Failure Reason:

Command failed on smithi103 with status 5: 'sudo systemctl stop ceph-afb2e814-51e7-11ed-8438-001a4aab830c@mon.a'

fail 7076824 2022-10-20 14:57:41 2022-10-22 08:43:17 2022-10-22 08:56:35 0:13:18 0:06:52 0:06:26 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/fastclose rados thrashers/pggrow thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-456a5852-51e7-11ed-8438-001a4aab830c@mon.a'

fail 7076825 2022-10-20 14:57:42 2022-10-22 08:43:27 2022-10-22 08:54:46 0:11:19 0:05:15 0:06:04 smithi main ubuntu 18.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi078 with status 5: 'sudo systemctl stop ceph-127af2f8-51e7-11ed-8438-001a4aab830c@mon.a'

fail 7076826 2022-10-20 14:57:43 2022-10-22 08:43:28 2022-10-22 08:58:31 0:15:03 0:07:50 0:07:13 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi057 with status 5: 'sudo systemctl stop ceph-66fade4c-51e7-11ed-8438-001a4aab830c@mon.smithi057'

fail 7076827 2022-10-20 14:57:44 2022-10-22 08:43:38 2022-10-22 09:00:32 0:16:54 0:10:02 0:06:52 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi046 with status 5: 'sudo systemctl stop ceph-c35c8320-51e7-11ed-8438-001a4aab830c@mon.a'

fail 7076828 2022-10-20 14:57:46 2022-10-22 08:43:39 2022-10-22 09:00:54 0:17:15 0:10:58 0:06:17 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_rhel8 fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi116 with status 5: 'sudo systemctl stop ceph-e53f968a-51e7-11ed-8438-001a4aab830c@mon.a'