Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7079773 2022-10-24 15:02:37 2022-10-24 19:39:13 2022-10-24 19:55:19 0:16:06 0:09:05 0:07:01 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_orch_cli} 1
Failure Reason:

Command failed on smithi130 with status 5: 'sudo systemctl stop ceph-b62a1358-53d5-11ed-8438-001a4aab830c@mon.a'

fail 7079775 2022-10-24 15:02:39 2022-10-24 19:40:45 2022-10-24 19:52:17 0:11:32 0:04:52 0:06:40 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi110 with status 5: 'sudo systemctl stop ceph-34406c98-53d5-11ed-8438-001a4aab830c@mon.smithi110'

fail 7079777 2022-10-24 15:02:40 2022-10-24 19:41:06 2022-10-24 20:10:45 0:29:39 0:21:45 0:07:54 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi062 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f882aa76-53d5-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079779 2022-10-24 15:02:41 2022-10-24 19:42:38 2022-10-24 19:57:00 0:14:22 0:06:36 0:07:46 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/few rados thrashers/pggrow thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi003 with status 5: 'sudo systemctl stop ceph-d3a96d52-53d5-11ed-8438-001a4aab830c@mon.a'

fail 7079781 2022-10-24 15:02:42 2022-10-24 19:43:59 2022-10-24 19:59:27 0:15:28 0:05:31 0:09:57 smithi main ubuntu 20.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_20.04 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi063 with status 5: 'sudo systemctl stop ceph-3260a1bc-53d6-11ed-8438-001a4aab830c@mon.a'

fail 7079783 2022-10-24 15:02:44 2022-10-24 19:44:30 2022-10-24 20:02:28 0:17:58 0:10:42 0:07:16 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-a66bac3c-53d6-11ed-8438-001a4aab830c@mon.a'

fail 7079785 2022-10-24 15:02:45 2022-10-24 19:45:21 2022-10-24 20:01:09 0:15:48 0:05:35 0:10:13 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi120 with status 5: 'sudo systemctl stop ceph-70e306f0-53d6-11ed-8438-001a4aab830c@mon.smithi120'

fail 7079787 2022-10-24 15:02:46 2022-10-24 19:45:51 2022-10-24 20:00:55 0:15:04 0:07:12 0:07:52 smithi main centos 8.stream orch/cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi049 with status 5: 'sudo systemctl stop ceph-8136c80c-53d6-11ed-8438-001a4aab830c@mon.smithi049'

fail 7079789 2022-10-24 15:02:48 2022-10-24 19:47:22 2022-10-24 20:04:17 0:16:55 0:10:06 0:06:49 smithi main centos 8.stream orch/cephadm/dashboard/{0-distro/centos_8.stream_container_tools task/test_e2e} 2
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-d4992f26-53d6-11ed-8438-001a4aab830c@mon.a'

fail 7079791 2022-10-24 15:02:49 2022-10-24 19:47:23 2022-10-24 20:17:24 0:30:01 0:22:07 0:07:54 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi055 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f4d6797e-53d6-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079793 2022-10-24 15:02:50 2022-10-24 19:48:04 2022-10-24 20:18:18 0:30:14 0:21:29 0:08:45 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi066 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid aed52420-53d6-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

pass 7079795 2022-10-24 15:02:52 2022-10-24 19:48:15 2022-10-24 20:13:40 0:25:25 0:18:59 0:06:26 smithi main rhel 8.4 orch/cephadm/orchestrator_cli/{0-random-distro$/{rhel_8.4_container_tools_rhel8} 2-node-mgr orchestrator_cli} 2
pass 7079797 2022-10-24 15:02:53 2022-10-24 19:48:46 2022-10-24 20:23:49 0:35:03 0:27:45 0:07:18 smithi main rhel 8.4 orch/cephadm/rbd_iscsi/{base/install cluster/{fixed-3 openstack} pool/datapool supported-random-distro$/{rhel_8} workloads/ceph_iscsi} 3
fail 7079799 2022-10-24 15:02:54 2022-10-24 19:50:37 2022-10-24 20:04:38 0:14:01 0:07:01 0:07:00 smithi main centos 8.stream orch/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi057 with status 5: 'sudo systemctl stop ceph-ffcc9a3e-53d6-11ed-8438-001a4aab830c@mon.a'

fail 7079801 2022-10-24 15:02:56 2022-10-24 19:52:48 2022-10-24 20:05:36 0:12:48 0:07:03 0:05:45 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi045 with status 5: 'sudo systemctl stop ceph-257f8534-53d7-11ed-8438-001a4aab830c@mon.smithi045'

fail 7079803 2022-10-24 15:02:57 2022-10-24 19:53:29 2022-10-24 20:08:56 0:15:27 0:07:54 0:07:33 smithi main rhel 8.4 orch/cephadm/smoke-singlehost/{0-distro$/{rhel_8.4_container_tools_3.0} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi159 with status 5: 'sudo systemctl stop ceph-66ab4e9e-53d7-11ed-8438-001a4aab830c@mon.smithi159'

fail 7079805 2022-10-24 15:02:58 2022-10-24 19:54:30 2022-10-24 20:11:39 0:17:09 0:09:54 0:07:15 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi036 with status 5: 'sudo systemctl stop ceph-d3e55f54-53d7-11ed-8438-001a4aab830c@mon.a'

fail 7079807 2022-10-24 15:02:59 2022-10-24 19:55:11 2022-10-24 20:25:26 0:30:15 0:19:41 0:10:34 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls mon_election/classic} 2
Failure Reason:

Command failed on smithi106 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid dbcf6c6e-53d7-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)"\''

fail 7079809 2022-10-24 15:03:01 2022-10-24 19:56:12 2022-10-24 20:15:06 0:18:54 0:11:49 0:07:05 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi018 with status 5: 'sudo systemctl stop ceph-3fa1d77c-53d8-11ed-8438-001a4aab830c@mon.a'

pass 7079811 2022-10-24 15:03:02 2022-10-24 19:56:53 2022-10-24 20:16:41 0:19:48 0:11:37 0:08:11 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_adoption} 1
fail 7079813 2022-10-24 15:03:03 2022-10-24 19:57:04 2022-10-24 20:11:55 0:14:51 0:06:57 0:07:54 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/osd-delay rados thrashers/careful thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi006 with status 5: 'sudo systemctl stop ceph-f52bd72e-53d7-11ed-8438-001a4aab830c@mon.a'

fail 7079815 2022-10-24 15:03:05 2022-10-24 19:59:25 2022-10-24 20:14:28 0:15:03 0:08:27 0:06:36 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi063 with status 5: 'sudo systemctl stop ceph-3e04c1cc-53d8-11ed-8438-001a4aab830c@mon.smithi063'

fail 7079817 2022-10-24 15:03:06 2022-10-24 19:59:56 2022-10-24 20:16:50 0:16:54 0:10:27 0:06:27 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi121 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7079819 2022-10-24 15:03:07 2022-10-24 20:00:57 2022-10-24 20:16:21 0:15:24 0:08:30 0:06:54 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_3.0 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi120 with status 5: 'sudo systemctl stop ceph-8248b1b8-53d8-11ed-8438-001a4aab830c@mon.a'

fail 7079821 2022-10-24 15:03:09 2022-10-24 20:01:58 2022-10-24 20:15:17 0:13:19 0:07:08 0:06:11 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi074 with status 5: 'sudo systemctl stop ceph-74d00554-53d8-11ed-8438-001a4aab830c@mon.a'

fail 7079823 2022-10-24 15:03:10 2022-10-24 20:02:39 2022-10-24 20:17:28 0:14:49 0:08:36 0:06:13 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-af89912e-53d8-11ed-8438-001a4aab830c@mon.smithi040'

fail 7079825 2022-10-24 15:03:11 2022-10-24 20:02:50 2022-10-24 20:31:47 0:28:57 0:22:20 0:06:37 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi032 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fcf831ae-53d8-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079827 2022-10-24 15:03:13 2022-10-24 20:03:31 2022-10-24 20:18:47 0:15:16 0:07:56 0:07:20 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi079 with status 5: 'sudo systemctl stop ceph-c7ff7462-53d8-11ed-8438-001a4aab830c@mon.smithi079'

fail 7079829 2022-10-24 15:03:14 2022-10-24 20:03:52 2022-10-24 20:21:21 0:17:29 0:10:14 0:07:15 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-3ba5bc50-53d9-11ed-8438-001a4aab830c@mon.a'

fail 7079831 2022-10-24 15:03:15 2022-10-24 20:04:23 2022-10-24 20:20:48 0:16:25 0:07:53 0:08:32 smithi main ubuntu 18.04 orch/rook/smoke/{0-distro/ubuntu_18.04 0-kubeadm 1-rook 2-workload/none 3-final cluster/3-node k8s/1.21 net/calico rook/1.6.2} 3
Failure Reason:

Command failed on smithi045 with status 1: 'kubectl create -f rook/cluster/examples/kubernetes/ceph/crds.yaml -f rook/cluster/examples/kubernetes/ceph/common.yaml -f operator.yaml'

fail 7079833 2022-10-24 15:03:16 2022-10-24 20:06:34 2022-10-24 20:24:26 0:17:52 0:11:17 0:06:35 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_rhel8 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi044 with status 5: 'sudo systemctl stop ceph-c3c2cdd0-53d9-11ed-8438-001a4aab830c@mon.a'

pass 7079836 2022-10-24 15:03:18 2022-10-24 20:07:57 2022-10-24 20:22:35 0:14:38 0:08:14 0:06:24 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm_repos} 1
fail 7079839 2022-10-24 15:03:19 2022-10-24 20:09:49 2022-10-24 20:24:42 0:14:53 0:07:29 0:07:24 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_rhel8 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi110 with status 5: 'sudo systemctl stop ceph-8e211132-53d9-11ed-8438-001a4aab830c@mon.a'

fail 7079842 2022-10-24 15:03:21 2022-10-24 20:10:00 2022-10-24 20:22:05 0:12:05 0:04:48 0:07:17 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi062 with status 5: 'sudo systemctl stop ceph-5c74cbec-53d9-11ed-8438-001a4aab830c@mon.smithi062'

fail 7079845 2022-10-24 15:03:22 2022-10-24 20:11:52 2022-10-24 20:25:18 0:13:26 0:06:34 0:06:52 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/octopus backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/rbd_cls} 3
Failure Reason:

Command failed on smithi006 with status 5: 'sudo systemctl stop ceph-c6669bf2-53d9-11ed-8438-001a4aab830c@mon.a'

fail 7079848 2022-10-24 15:03:23 2022-10-24 20:12:54 2022-10-24 20:43:07 0:30:13 0:21:30 0:08:43 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi005 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7febc46c-53da-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079851 2022-10-24 15:03:25 2022-10-24 20:15:16 2022-10-24 20:38:15 0:22:59 0:16:51 0:06:08 smithi main centos 8.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls mon_election/connectivity} 2
Failure Reason:

Command failed on smithi080 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 406923ac-53da-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079854 2022-10-24 15:03:26 2022-10-24 20:16:07 2022-10-24 20:33:25 0:17:18 0:09:27 0:07:51 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_nfs} 1
Failure Reason:

Command failed on smithi154 with status 5: 'sudo systemctl stop ceph-cf4cd4d8-53da-11ed-8438-001a4aab830c@mon.a'

fail 7079857 2022-10-24 15:03:27 2022-10-24 20:16:58 2022-10-24 20:35:08 0:18:10 0:05:44 0:12:26 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
Failure Reason:

Command failed on smithi055 with status 5: 'sudo systemctl stop ceph-079453ac-53db-11ed-8438-001a4aab830c@mon.smithi055'

fail 7079860 2022-10-24 15:03:28 2022-10-24 20:18:20 2022-10-24 20:33:47 0:15:27 0:08:11 0:07:16 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi059 with status 5: 'sudo systemctl stop ceph-e935cf1c-53da-11ed-8438-001a4aab830c@mon.smithi059'

fail 7079863 2022-10-24 15:03:30 2022-10-24 20:19:01 2022-10-24 20:32:02 0:13:01 0:05:29 0:07:32 smithi main ubuntu 18.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi027 with status 5: 'sudo systemctl stop ceph-d79ed29e-53da-11ed-8438-001a4aab830c@mon.a'

fail 7079866 2022-10-24 15:03:31 2022-10-24 20:20:52 2022-10-24 20:44:01 0:23:09 0:17:12 0:05:57 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.5 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi045 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 270cbd46-53db-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079869 2022-10-24 15:03:32 2022-10-24 20:21:24 2022-10-24 20:36:14 0:14:50 0:07:28 0:07:22 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

Command failed on smithi053 with status 5: 'sudo systemctl stop ceph-3276e116-53db-11ed-8438-001a4aab830c@mon.smithi053'

fail 7079872 2022-10-24 15:03:33 2022-10-24 20:22:15 2022-10-24 20:39:49 0:17:34 0:10:25 0:07:09 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi057 with status 5: 'sudo systemctl stop ceph-d52432ec-53db-11ed-8438-001a4aab830c@mon.a'

fail 7079875 2022-10-24 15:03:35 2022-10-24 20:24:27 2022-10-24 20:37:57 0:13:30 0:07:37 0:05:53 smithi main ubuntu 18.04 orch/cephadm/with-work/{0-distro/ubuntu_18.04 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi046 with status 5: 'sudo systemctl stop ceph-b23e0b4a-53db-11ed-8438-001a4aab830c@mon.a'

fail 7079878 2022-10-24 15:03:36 2022-10-24 20:25:28 2022-10-24 20:42:41 0:17:13 0:09:36 0:07:37 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi153 with status 5: 'sudo systemctl stop ceph-20c25670-53dc-11ed-8438-001a4aab830c@mon.a'

fail 7079881 2022-10-24 15:03:37 2022-10-24 20:25:29 2022-10-24 20:42:14 0:16:45 0:06:20 0:10:25 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/luminous-v1only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-1b54b39a-53dc-11ed-8438-001a4aab830c@mon.a'

fail 7079884 2022-10-24 15:03:39 2022-10-24 20:29:31 2022-10-24 20:56:47 0:27:16 0:20:22 0:06:54 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi071 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid aa8cb2ce-53dc-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079887 2022-10-24 15:03:40 2022-10-24 20:30:23 2022-10-24 20:46:26 0:16:03 0:05:22 0:10:41 smithi main ubuntu 20.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_20.04 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi047 with status 5: 'sudo systemctl stop ceph-bad0f8ca-53dc-11ed-8438-001a4aab830c@mon.a'

fail 7079890 2022-10-24 15:03:41 2022-10-24 20:32:05 2022-10-24 20:47:10 0:15:05 0:08:10 0:06:55 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
Failure Reason:

Command failed on smithi121 with status 5: 'sudo systemctl stop ceph-c66060ae-53dc-11ed-8438-001a4aab830c@mon.smithi121'

fail 7079893 2022-10-24 15:03:42 2022-10-24 20:33:07 2022-10-24 20:52:36 0:19:29 0:10:47 0:08:42 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi055 with status 5: 'sudo systemctl stop ceph-a9da4aa2-53dd-11ed-8438-001a4aab830c@mon.a'

fail 7079896 2022-10-24 15:03:44 2022-10-24 20:47:36 327 smithi main ubuntu 18.04 orch/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi053 with status 5: 'sudo systemctl stop ceph-04535dd0-53dd-11ed-8438-001a4aab830c@mon.smithi053'

fail 7079898 2022-10-24 15:03:45 2022-10-24 20:36:50 2022-10-24 20:51:28 0:14:38 0:06:45 0:07:53 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/luminous backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/fastclose rados thrashers/none thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi087 with status 5: 'sudo systemctl stop ceph-747747f2-53dd-11ed-8438-001a4aab830c@mon.a'

fail 7079901 2022-10-24 15:03:46 2022-10-24 20:38:21 2022-10-24 20:53:45 0:15:24 0:07:52 0:07:32 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

Command failed on smithi003 with status 5: 'sudo systemctl stop ceph-a87c6b36-53dd-11ed-8438-001a4aab830c@mon.smithi003'

fail 7079904 2022-10-24 15:03:47 2022-10-24 20:38:52 2022-10-24 20:55:30 0:16:38 0:05:08 0:11:30 smithi main orch/cephadm/dashboard/{0-distro/ignorelist_health task/test_e2e} 2
Failure Reason:

Failed to fetch package version from https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a

fail 7079907 2022-10-24 15:03:49 2022-10-24 20:41:34 2022-10-24 21:09:36 0:28:02 0:21:07 0:06:55 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi044 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 77bb1f64-53de-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079910 2022-10-24 15:03:50 2022-10-24 20:43:16 2022-10-24 20:56:04 0:12:48 0:06:44 0:06:04 smithi main centos 8.stream orch/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi018 with status 5: 'sudo systemctl stop ceph-24abe6c8-53de-11ed-8438-001a4aab830c@mon.a'

fail 7079913 2022-10-24 15:03:51 2022-10-24 20:44:48 2022-10-24 20:59:10 0:14:22 0:07:09 0:07:13 smithi main rhel 8.4 orch/cephadm/smoke-singlehost/{0-distro$/{rhel_8.4_container_tools_rhel8} 1-start 2-services/rgw 3-final} 1
Failure Reason:

Command failed on smithi179 with status 5: 'sudo systemctl stop ceph-99117302-53de-11ed-8438-001a4aab830c@mon.smithi179'

fail 7079916 2022-10-24 15:03:52 2022-10-24 20:47:40 2022-10-24 21:05:42 0:18:02 0:09:45 0:08:17 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi032 with status 5: 'sudo systemctl stop ceph-5b4015c8-53df-11ed-8438-001a4aab830c@mon.a'

fail 7079919 2022-10-24 15:03:54 2022-10-24 20:49:21 2022-10-24 21:22:47 0:33:26 0:23:42 0:09:44 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls mon_election/classic} 2
Failure Reason:

Command failed on smithi050 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.9 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 59f092ec-53df-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.mgr | length == 2\'"\'"\'\''

fail 7079922 2022-10-24 15:03:55 2022-10-24 20:50:32 2022-10-24 21:07:47 0:17:15 0:08:02 0:09:13 smithi main ubuntu 20.04 orch/cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi143 with status 5: 'sudo systemctl stop ceph-cfbd5096-53df-11ed-8438-001a4aab830c@mon.a'

pass 7079925 2022-10-24 15:03:56 2022-10-24 20:51:34 2022-10-24 21:10:17 0:18:43 0:11:48 0:06:55 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_adoption} 1
fail 7079928 2022-10-24 15:03:58 2022-10-24 20:52:45 2022-10-24 21:05:34 0:12:49 0:05:44 0:07:05 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/rgw 3-final} 2
Failure Reason:

Command failed on smithi055 with status 5: 'sudo systemctl stop ceph-47f04c36-53df-11ed-8438-001a4aab830c@mon.smithi055'

fail 7079931 2022-10-24 15:03:59 2022-10-24 20:53:46 2022-10-24 21:07:17 0:13:31 0:07:40 0:05:51 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/mimic-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/few rados thrashers/pggrow thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi037 with status 5: 'sudo systemctl stop ceph-cb73f3be-53df-11ed-8438-001a4aab830c@mon.a'

fail 7079934 2022-10-24 15:04:00 2022-10-24 20:54:48 2022-10-24 21:13:02 0:18:14 0:10:54 0:07:20 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi157 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7079937 2022-10-24 15:04:01 2022-10-24 20:55:39 2022-10-24 21:09:13 0:13:34 0:06:12 0:07:22 smithi main ubuntu 18.04 orch/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/repave-all} 2
Failure Reason:

Command failed on smithi018 with status 5: 'sudo systemctl stop ceph-dc00adda-53df-11ed-8438-001a4aab830c@mon.smithi018'

fail 7079940 2022-10-24 15:04:03 2022-10-24 20:56:40 2022-10-24 21:11:50 0:15:10 0:08:49 0:06:21 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_3.0 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi071 with status 5: 'sudo systemctl stop ceph-5087fb18-53e0-11ed-8438-001a4aab830c@mon.a'

fail 7079943 2022-10-24 15:04:04 2022-10-24 20:59:12 2022-10-24 21:14:29 0:15:17 0:05:08 0:10:09 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/basic 3-final} 2
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-abcf59ee-53e0-11ed-8438-001a4aab830c@mon.smithi040'

fail 7079946 2022-10-24 15:04:05 2022-10-24 21:00:34 2022-10-24 21:31:38 0:31:04 0:21:25 0:09:39 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi053 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4545dc60-53e1-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079949 2022-10-24 15:04:06 2022-10-24 21:05:07 2022-10-24 21:32:06 0:26:59 0:20:29 0:06:30 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi055 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 38a6dd9c-53e1-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079952 2022-10-24 15:04:08 2022-10-24 21:06:28 2022-10-24 21:20:10 0:13:42 0:06:46 0:06:56 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi079 with status 5: 'sudo systemctl stop ceph-82f63ad2-53e1-11ed-8438-001a4aab830c@mon.smithi079'

fail 7079955 2022-10-24 15:04:09 2022-10-24 21:07:50 2022-10-24 21:24:39 0:16:49 0:09:42 0:07:07 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi137 with status 5: 'sudo systemctl stop ceph-ff4353ae-53e1-11ed-8438-001a4aab830c@mon.a'

fail 7079958 2022-10-24 15:04:10 2022-10-24 21:09:41 2022-10-24 21:26:45 0:17:04 0:09:56 0:07:08 smithi main centos 8.stream orch/cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi006 with status 5: 'sudo systemctl stop ceph-533243f8-53e2-11ed-8438-001a4aab830c@mon.a'

pass 7079961 2022-10-24 15:04:11 2022-10-24 21:10:42 2022-10-24 21:25:18 0:14:36 0:08:21 0:06:15 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm_repos} 1
fail 7079964 2022-10-24 15:04:12 2022-10-24 21:11:14 2022-10-24 21:25:04 0:13:50 0:06:44 0:07:06 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/mimic backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/osd-delay rados thrashers/careful thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi099 with status 5: 'sudo systemctl stop ceph-26387192-53e2-11ed-8438-001a4aab830c@mon.a'

fail 7079967 2022-10-24 15:04:14 2022-10-24 21:11:55 2022-10-24 21:29:18 0:17:23 0:07:41 0:09:42 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_rhel8 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi153 with status 5: 'sudo systemctl stop ceph-97fc74b8-53e2-11ed-8438-001a4aab830c@mon.a'

fail 7079970 2022-10-24 15:04:15 2022-10-24 21:14:36 2022-10-24 21:30:03 0:15:27 0:08:04 0:07:23 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi164 with status 5: 'sudo systemctl stop ceph-c009d87e-53e2-11ed-8438-001a4aab830c@mon.smithi164'

fail 7079973 2022-10-24 15:04:16 2022-10-24 21:18:38 2022-10-24 21:46:35 0:27:57 0:21:10 0:06:47 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi049 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a46baf4c-53e3-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079976 2022-10-24 15:04:17 2022-10-24 21:20:10 2022-10-24 21:50:51 0:30:41 0:19:39 0:11:02 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls mon_election/connectivity} 2
Failure Reason:

Command failed on smithi027 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d9465df2-53e3-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7079979 2022-10-24 15:04:19 2022-10-24 21:20:22 2022-10-24 21:37:14 0:16:52 0:09:33 0:07:19 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_nfs} 1
Failure Reason:

Command failed on smithi120 with status 5: 'sudo systemctl stop ceph-bd42ac0a-53e3-11ed-8438-001a4aab830c@mon.a'

fail 7079981 2022-10-24 15:04:20 2022-10-24 21:21:23 2022-10-24 21:36:56 0:15:33 0:05:35 0:09:58 smithi main ubuntu 20.04 orch/cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi017 with status 5: 'sudo systemctl stop ceph-d0541e5a-53e3-11ed-8438-001a4aab830c@mon.smithi017'

fail 7079983 2022-10-24 15:04:21 2022-10-24 21:22:34 2022-10-24 21:36:09 0:13:35 0:06:40 0:06:55 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/rbd_cls} 3
Failure Reason:

Command failed on smithi143 with status 5: 'sudo systemctl stop ceph-afa93442-53e3-11ed-8438-001a4aab830c@mon.a'

fail 7079985 2022-10-24 15:04:22 2022-10-24 21:23:05 2022-10-24 21:35:24 0:12:19 0:05:21 0:06:58 smithi main ubuntu 18.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi121 with status 5: 'sudo systemctl stop ceph-acebb7f2-53e3-11ed-8438-001a4aab830c@mon.a'

fail 7079987 2022-10-24 15:04:23 2022-10-24 21:24:46 2022-10-24 21:41:58 0:17:12 0:06:48 0:10:24 smithi main ubuntu 20.04 orch/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/master} 1
Failure Reason:

[Errno 2] Cannot find file on the remote 'ubuntu@smithi149.front.sepia.ceph.com': 'rook/cluster/examples/kubernetes/ceph/operator.yaml'

fail 7079989 2022-10-24 15:04:25 2022-10-24 21:24:46 2022-10-24 21:39:42 0:14:56 0:07:59 0:06:57 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi008 with status 5: 'sudo systemctl stop ceph-1712a136-53e4-11ed-8438-001a4aab830c@mon.smithi008'

fail 7079991 2022-10-24 15:04:26 2022-10-24 21:25:07 2022-10-24 21:42:01 0:16:54 0:10:21 0:06:33 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi071 with status 5: 'sudo systemctl stop ceph-83da5d4a-53e4-11ed-8438-001a4aab830c@mon.a'

fail 7079993 2022-10-24 15:04:27 2022-10-24 21:26:48 2022-10-24 21:47:11 0:20:23 0:12:00 0:08:23 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_3.0 fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi012 with status 5: 'sudo systemctl stop ceph-2371d8c4-53e5-11ed-8438-001a4aab830c@mon.a'

fail 7079995 2022-10-24 15:04:28 2022-10-24 21:27:59 2022-10-24 21:45:03 0:17:04 0:09:32 0:07:32 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_orch_cli} 1
Failure Reason:

Command failed on smithi093 with status 5: 'sudo systemctl stop ceph-d365433e-53e4-11ed-8438-001a4aab830c@mon.a'

fail 7079997 2022-10-24 15:04:30 2022-10-24 21:29:20 2022-10-24 21:40:44 0:11:24 0:05:04 0:06:20 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi003 with status 5: 'sudo systemctl stop ceph-61e5dd04-53e4-11ed-8438-001a4aab830c@mon.smithi003'

fail 7079999 2022-10-24 15:04:31 2022-10-24 21:29:51 2022-10-24 21:57:20 0:27:29 0:20:36 0:06:53 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi164 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1ef58160-53e5-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080001 2022-10-24 15:04:32 2022-10-24 21:31:32 2022-10-24 21:45:27 0:13:55 0:06:21 0:07:34 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

Command failed on smithi055 with status 5: 'sudo systemctl stop ceph-f0ba2e40-53e4-11ed-8438-001a4aab830c@mon.a'

fail 7080003 2022-10-24 15:04:33 2022-10-24 21:32:43 2022-10-24 21:48:01 0:15:18 0:05:04 0:10:14 smithi main ubuntu 20.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_20.04 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi053 with status 5: 'sudo systemctl stop ceph-4c194fc8-53e5-11ed-8438-001a4aab830c@mon.a'

fail 7080005 2022-10-24 15:04:35 2022-10-24 21:35:35 2022-10-24 21:52:59 0:17:24 0:10:30 0:06:54 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi005 with status 5: 'sudo systemctl stop ceph-0ffdfba0-53e6-11ed-8438-001a4aab830c@mon.a'

fail 7080007 2022-10-24 15:04:36 2022-10-24 21:36:16 2022-10-24 21:49:04 0:12:48 0:06:56 0:05:52 smithi main centos 8.stream orch/cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi130 with status 5: 'sudo systemctl stop ceph-936ff8f4-53e5-11ed-8438-001a4aab830c@mon.smithi130'

fail 7080009 2022-10-24 15:04:37 2022-10-24 21:37:07 2022-10-24 21:52:36 0:15:29 0:05:28 0:10:01 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi018 with status 5: 'sudo systemctl stop ceph-fe6e9a70-53e5-11ed-8438-001a4aab830c@mon.smithi018'

fail 7080011 2022-10-24 15:04:38 2022-10-24 21:37:28 2022-10-24 21:55:14 0:17:46 0:09:58 0:07:48 smithi main centos 8.stream orch/cephadm/dashboard/{0-distro/centos_8.stream_container_tools task/test_e2e} 2
Failure Reason:

Command failed on smithi171 with status 5: 'sudo systemctl stop ceph-4ecff7b6-53e6-11ed-8438-001a4aab830c@mon.a'

fail 7080013 2022-10-24 15:04:40 2022-10-24 21:38:29 2022-10-24 22:07:25 0:28:56 0:21:59 0:06:57 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi036 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4db02d60-53e6-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080015 2022-10-24 15:04:41 2022-10-24 21:39:20 2022-10-24 22:06:30 0:27:10 0:21:11 0:05:59 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi159 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0daed392-53e6-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

pass 7080017 2022-10-24 15:04:42 2022-10-24 21:39:21 2022-10-24 21:58:27 0:19:06 0:13:28 0:05:38 smithi main ubuntu 18.04 orch/cephadm/orchestrator_cli/{0-random-distro$/{ubuntu_18.04} 2-node-mgr orchestrator_cli} 2
pass 7080019 2022-10-24 15:04:43 2022-10-24 21:39:52 2022-10-24 22:13:45 0:33:53 0:26:54 0:06:59 smithi main rhel 8.4 orch/cephadm/rbd_iscsi/{base/install cluster/{fixed-3 openstack} pool/datapool supported-random-distro$/{rhel_8} workloads/ceph_iscsi} 3
fail 7080021 2022-10-24 15:04:45 2022-10-24 21:41:03 2022-10-24 21:55:06 0:14:03 0:06:49 0:07:14 smithi main centos 8.stream orch/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi071 with status 5: 'sudo systemctl stop ceph-66c1e3a2-53e6-11ed-8438-001a4aab830c@mon.a'

fail 7080023 2022-10-24 15:04:46 2022-10-24 21:43:44 2022-10-24 21:56:45 0:13:01 0:06:47 0:06:14 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi032 with status 5: 'sudo systemctl stop ceph-a26fc630-53e6-11ed-8438-001a4aab830c@mon.smithi032'

fail 7080025 2022-10-24 15:04:47 2022-10-24 21:45:05 2022-10-24 22:00:49 0:15:44 0:04:46 0:10:58 smithi main ubuntu 20.04 orch/cephadm/smoke-singlehost/{0-distro$/{ubuntu_20.04} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi138 with status 5: 'sudo systemctl stop ceph-0dd77bde-53e7-11ed-8438-001a4aab830c@mon.smithi138'

fail 7080027 2022-10-24 15:04:48 2022-10-24 21:45:35 2022-10-24 22:04:37 0:19:02 0:09:36 0:09:26 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi049 with status 5: 'sudo systemctl stop ceph-92e6f0ca-53e7-11ed-8438-001a4aab830c@mon.a'

fail 7080029 2022-10-24 15:04:50 2022-10-24 21:47:17 2022-10-24 22:10:27 0:23:10 0:16:40 0:06:30 smithi main centos 8.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls mon_election/classic} 2
Failure Reason:

Command failed on smithi133 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1c0905ce-53e7-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080031 2022-10-24 15:04:51 2022-10-24 21:48:08 2022-10-24 22:05:51 0:17:43 0:10:47 0:06:56 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_rhel8 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi130 with status 5: 'sudo systemctl stop ceph-dc6c1e8c-53e7-11ed-8438-001a4aab830c@mon.a'

pass 7080033 2022-10-24 15:04:52 2022-10-24 21:49:59 2022-10-24 22:06:57 0:16:58 0:11:19 0:05:39 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_adoption} 1
fail 7080035 2022-10-24 15:04:53 2022-10-24 21:51:00 2022-10-24 22:06:37 0:15:37 0:06:48 0:08:49 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi018 with status 5: 'sudo systemctl stop ceph-f638269e-53e7-11ed-8438-001a4aab830c@mon.a'

fail 7080037 2022-10-24 15:04:54 2022-10-24 21:53:01 2022-10-24 22:07:54 0:14:53 0:08:06 0:06:47 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
Failure Reason:

Command failed on smithi087 with status 5: 'sudo systemctl stop ceph-0b4dc5d4-53e8-11ed-8438-001a4aab830c@mon.smithi087'

fail 7080039 2022-10-24 15:04:56 2022-10-24 21:53:02 2022-10-24 22:07:40 0:14:38 0:08:14 0:06:24 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi005 with status 5: 'sudo systemctl stop ceph-06cf5c7a-53e8-11ed-8438-001a4aab830c@mon.smithi005'

fail 7080041 2022-10-24 15:04:57 2022-10-24 21:54:34 2022-10-24 22:11:49 0:17:15 0:10:21 0:06:54 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi154 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7080043 2022-10-24 15:04:58 2022-10-24 21:55:14 2022-10-24 22:10:17 0:15:03 0:08:14 0:06:49 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_3.0 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi171 with status 5: 'sudo systemctl stop ceph-64528a48-53e8-11ed-8438-001a4aab830c@mon.a'

fail 7080045 2022-10-24 15:04:59 2022-10-24 21:55:45 2022-10-24 22:09:44 0:13:59 0:07:00 0:06:59 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/octopus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/fastclose rados thrashers/none thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi099 with status 5: 'sudo systemctl stop ceph-6c8be380-53e8-11ed-8438-001a4aab830c@mon.a'

fail 7080047 2022-10-24 15:05:00 2022-10-24 21:56:46 2022-10-24 22:25:30 0:28:44 0:21:45 0:06:59 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi040 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d07a84c8-53e8-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080049 2022-10-24 15:05:01 2022-10-24 21:57:27 2022-10-24 22:10:30 0:13:03 0:07:33 0:05:30 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

Command failed on smithi164 with status 5: 'sudo systemctl stop ceph-9e1f70f6-53e8-11ed-8438-001a4aab830c@mon.smithi164'

fail 7080051 2022-10-24 15:05:03 2022-10-24 21:58:28 2022-10-24 22:15:50 0:17:22 0:09:32 0:07:50 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi153 with status 5: 'sudo systemctl stop ceph-20bd2c56-53e9-11ed-8438-001a4aab830c@mon.a'

fail 7080053 2022-10-24 15:05:04 2022-10-24 22:00:59 2022-10-24 22:15:56 0:14:57 0:06:58 0:07:59 smithi main ubuntu 18.04 orch/cephadm/with-work/{0-distro/ubuntu_18.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi052 with status 5: 'sudo systemctl stop ceph-4a7fffb4-53e9-11ed-8438-001a4aab830c@mon.a'

pass 7080055 2022-10-24 15:05:05 2022-10-24 22:03:10 2022-10-24 22:19:03 0:15:53 0:07:54 0:07:59 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm_repos} 1
fail 7080057 2022-10-24 15:05:06 2022-10-24 22:04:31 2022-10-24 22:19:47 0:15:16 0:07:44 0:07:32 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_rhel8 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi061 with status 5: 'sudo systemctl stop ceph-a6f781ea-53e9-11ed-8438-001a4aab830c@mon.a'

fail 7080059 2022-10-24 15:05:07 2022-10-24 22:04:42 2022-10-24 22:17:16 0:12:34 0:05:12 0:07:22 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
Failure Reason:

Command failed on smithi130 with status 5: 'sudo systemctl stop ceph-8139dcc8-53e9-11ed-8438-001a4aab830c@mon.smithi130'

fail 7080061 2022-10-24 15:05:09 2022-10-24 22:06:23 2022-10-24 22:19:57 0:13:34 0:06:29 0:07:05 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/luminous-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/few rados thrashers/careful thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi159 with status 5: 'sudo systemctl stop ceph-c73891ce-53e9-11ed-8438-001a4aab830c@mon.a'

fail 7080063 2022-10-24 15:05:10 2022-10-24 22:06:44 2022-10-24 22:21:11 0:14:27 0:08:00 0:06:27 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi017 with status 5: 'sudo systemctl stop ceph-e35d89e0-53e9-11ed-8438-001a4aab830c@mon.smithi017'

fail 7080065 2022-10-24 15:05:11 2022-10-24 22:07:35 2022-10-24 22:36:29 0:28:54 0:22:13 0:06:41 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi005 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 60b8cbfc-53ea-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080067 2022-10-24 15:05:12 2022-10-24 22:07:56 2022-10-24 22:42:47 0:34:51 0:23:16 0:11:35 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls mon_election/connectivity} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.9 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7d7cb96a-53ea-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.mgr | length == 2\'"\'"\'\''

fail 7080069 2022-10-24 15:05:13 2022-10-24 22:09:47 2022-10-24 22:26:06 0:16:19 0:09:46 0:06:33 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_nfs} 1
Failure Reason:

Command failed on smithi037 with status 5: 'sudo systemctl stop ceph-98d7a58a-53ea-11ed-8438-001a4aab830c@mon.a'

fail 7080071 2022-10-24 15:05:15 2022-10-24 22:09:48 2022-10-24 22:25:44 0:15:56 0:05:28 0:10:28 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

Command failed on smithi171 with status 5: 'sudo systemctl stop ceph-a09f3274-53ea-11ed-8438-001a4aab830c@mon.smithi171'

fail 7080073 2022-10-24 15:05:16 2022-10-24 22:10:21 2022-10-24 22:21:58 0:11:37 0:05:25 0:06:12 smithi main ubuntu 18.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi164 with status 5: 'sudo systemctl stop ceph-31506d48-53ea-11ed-8438-001a4aab830c@mon.a'

fail 7080074 2022-10-24 15:05:17 2022-10-24 22:10:32 2022-10-24 22:24:39 0:14:07 0:06:52 0:07:15 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/luminous backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/osd-delay rados thrashers/default thrashosds-health workloads/rbd_cls} 3
Failure Reason:

Command failed on smithi033 with status 5: 'sudo systemctl stop ceph-7da1e118-53ea-11ed-8438-001a4aab830c@mon.a'

fail 7080075 2022-10-24 15:05:18 2022-10-24 22:11:02 2022-10-24 22:36:11 0:25:09 0:17:22 0:07:47 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.5 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi133 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7f61e70a-53ea-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080076 2022-10-24 15:05:19 2022-10-24 22:11:03 2022-10-24 22:26:43 0:15:40 0:07:22 0:08:18 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/rgw 3-final} 2
Failure Reason:

Command failed on smithi055 with status 5: 'sudo systemctl stop ceph-9f2342dc-53ea-11ed-8438-001a4aab830c@mon.smithi055'

fail 7080077 2022-10-24 15:05:20 2022-10-24 22:12:13 2022-10-24 22:30:19 0:18:06 0:10:05 0:08:01 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi053 with status 5: 'sudo systemctl stop ceph-3a7bff80-53eb-11ed-8438-001a4aab830c@mon.a'

fail 7080078 2022-10-24 15:05:22 2022-10-24 22:13:34 2022-10-24 22:31:22 0:17:48 0:07:00 0:10:48 smithi main ubuntu 20.04 orch/cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi008 with status 5: 'sudo systemctl stop ceph-5a2c3c78-53eb-11ed-8438-001a4aab830c@mon.a'

fail 7080079 2022-10-24 15:05:23 2022-10-24 22:13:54 2022-10-24 22:30:31 0:16:37 0:09:56 0:06:41 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi003 with status 5: 'sudo systemctl stop ceph-3d8c21aa-53eb-11ed-8438-001a4aab830c@mon.a'

fail 7080080 2022-10-24 15:05:24 2022-10-24 22:13:55 2022-10-24 22:41:10 0:27:15 0:20:48 0:06:27 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi071 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4cbe2ff6-53eb-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080081 2022-10-24 15:05:25 2022-10-24 22:14:15 2022-10-24 22:31:12 0:16:57 0:07:46 0:09:11 smithi main rhel 8.4 orch/cephadm/osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/repave-all} 2
Failure Reason:

Command failed on smithi153 with status 5: 'sudo systemctl stop ceph-421bdc4c-53eb-11ed-8438-001a4aab830c@mon.smithi153'

fail 7080082 2022-10-24 15:05:26 2022-10-24 22:15:56 2022-10-24 22:31:19 0:15:23 0:05:21 0:10:02 smithi main ubuntu 20.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_20.04 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi052 with status 5: 'sudo systemctl stop ceph-5f25886a-53eb-11ed-8438-001a4aab830c@mon.a'

fail 7080083 2022-10-24 15:05:27 2022-10-24 22:16:06 2022-10-24 22:32:19 0:16:13 0:08:12 0:08:01 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/basic 3-final} 2
Failure Reason:

Command failed on smithi130 with status 5: 'sudo systemctl stop ceph-77bd2df6-53eb-11ed-8438-001a4aab830c@mon.smithi130'

fail 7080084 2022-10-24 15:05:29 2022-10-24 22:17:27 2022-10-24 22:35:57 0:18:30 0:10:31 0:07:59 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi006 with status 5: 'sudo systemctl stop ceph-1018ea7c-53ec-11ed-8438-001a4aab830c@mon.a'

fail 7080085 2022-10-24 15:05:30 2022-10-24 22:18:48 2022-10-24 22:32:11 0:13:23 0:06:39 0:06:44 smithi main ubuntu 18.04 orch/rook/smoke/{0-distro/ubuntu_18.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/1.6.2} 1
Failure Reason:

Command failed on smithi154 with status 1: 'kubectl create -f rook/cluster/examples/kubernetes/ceph/crds.yaml -f rook/cluster/examples/kubernetes/ceph/common.yaml -f operator.yaml'

fail 7080086 2022-10-24 15:05:31 2022-10-24 22:18:49 2022-10-24 22:33:12 0:14:23 0:06:26 0:07:57 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/mimic-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/fastclose rados thrashers/mapgap thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

Command failed on smithi061 with status 5: 'sudo systemctl stop ceph-a011ea08-53eb-11ed-8438-001a4aab830c@mon.a'

fail 7080087 2022-10-24 15:05:32 2022-10-24 22:19:50 2022-10-24 22:34:58 0:15:08 0:07:49 0:07:19 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi161 with status 5: 'sudo systemctl stop ceph-ca23fc96-53eb-11ed-8438-001a4aab830c@mon.smithi161'

fail 7080088 2022-10-24 15:05:33 2022-10-24 22:20:00 2022-10-24 22:35:46 0:15:46 0:05:31 0:10:15 smithi main orch/cephadm/dashboard/{0-distro/ignorelist_health task/test_e2e} 2
Failure Reason:

Failed to fetch package version from https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a

fail 7080089 2022-10-24 15:05:34 2022-10-24 22:20:00 2022-10-24 22:50:17 0:30:17 0:21:46 0:08:31 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi047 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 57aa92b4-53ec-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080090 2022-10-24 15:05:36 2022-10-24 22:20:11 2022-10-24 22:34:43 0:14:32 0:06:58 0:07:34 smithi main centos 8.stream orch/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi116 with status 5: 'sudo systemctl stop ceph-f52f9dd2-53eb-11ed-8438-001a4aab830c@mon.a'

fail 7080091 2022-10-24 15:05:37 2022-10-24 22:21:02 2022-10-24 22:32:11 0:11:09 0:04:56 0:06:13 smithi main ubuntu 18.04 orch/cephadm/smoke-singlehost/{0-distro$/{ubuntu_18.04} 1-start 2-services/rgw 3-final} 1
Failure Reason:

Command failed on smithi080 with status 5: 'sudo systemctl stop ceph-8ffb0b68-53eb-11ed-8438-001a4aab830c@mon.smithi080'

fail 7080092 2022-10-24 15:05:38 2022-10-24 22:21:02 2022-10-24 22:37:43 0:16:41 0:10:02 0:06:39 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi017 with status 5: 'sudo systemctl stop ceph-412481bc-53ec-11ed-8438-001a4aab830c@mon.a'

fail 7080093 2022-10-24 15:05:39 2022-10-24 22:21:13 2022-10-24 22:51:34 0:30:21 0:19:41 0:10:40 smithi main ubuntu 20.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls mon_election/classic} 2
Failure Reason:

Command failed on smithi164 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3dff5b24-53ec-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080094 2022-10-24 15:05:40 2022-10-24 22:22:03 2022-10-24 22:41:17 0:19:14 0:09:43 0:09:31 smithi main centos 8.stream orch/cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi175 with status 5: 'sudo systemctl stop ceph-b4907ca0-53ec-11ed-8438-001a4aab830c@mon.a'

pass 7080095 2022-10-24 15:05:41 2022-10-24 22:24:24 2022-10-24 22:41:24 0:17:00 0:11:25 0:05:35 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_adoption} 1
fail 7080096 2022-10-24 15:05:42 2022-10-24 22:24:24 2022-10-24 22:36:19 0:11:55 0:05:17 0:06:38 smithi main ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi102 with status 5: 'sudo systemctl stop ceph-2dc9fda4-53ec-11ed-8438-001a4aab830c@mon.smithi102'

fail 7080097 2022-10-24 15:05:44 2022-10-24 22:24:45 2022-10-24 22:38:42 0:13:57 0:06:34 0:07:23 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/mimic backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/few rados thrashers/morepggrow thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi040 with status 5: 'sudo systemctl stop ceph-6903097e-53ec-11ed-8438-001a4aab830c@mon.a'

fail 7080098 2022-10-24 15:05:45 2022-10-24 22:25:35 2022-10-24 22:36:42 0:11:07 0:05:16 0:05:51 smithi main ubuntu 18.04 orch/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi046 with status 5: 'sudo systemctl stop ceph-3bc62eb4-53ec-11ed-8438-001a4aab830c@mon.smithi046'

fail 7080099 2022-10-24 15:05:46 2022-10-24 22:25:36 2022-10-24 22:42:24 0:16:48 0:10:14 0:06:34 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi149 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7080100 2022-10-24 15:05:47 2022-10-24 22:25:36 2022-10-24 22:40:49 0:15:13 0:08:10 0:07:03 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_3.0 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi171 with status 5: 'sudo systemctl stop ceph-a5fb1d9e-53ec-11ed-8438-001a4aab830c@mon.a'

fail 7080101 2022-10-24 15:05:48 2022-10-24 22:25:46 2022-10-24 22:41:07 0:15:21 0:05:12 0:10:09 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi033 with status 5: 'sudo systemctl stop ceph-b8ed51ce-53ec-11ed-8438-001a4aab830c@mon.smithi033'

fail 7080102 2022-10-24 15:05:49 2022-10-24 22:26:17 2022-10-24 22:55:17 0:29:00 0:21:47 0:07:13 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi055 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 017ce22e-53ed-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080103 2022-10-24 15:05:51 2022-10-24 22:26:47 2022-10-24 22:42:40 0:15:53 0:06:35 0:09:18 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/osd-delay rados thrashers/none thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-f6e4a4be-53ec-11ed-8438-001a4aab830c@mon.a'

fail 7080104 2022-10-24 15:05:52 2022-10-24 22:28:58 2022-10-24 22:56:41 0:27:43 0:20:46 0:06:57 smithi main centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi012 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 15663dd0-53ed-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080105 2022-10-24 15:05:53 2022-10-24 22:28:59 2022-10-24 22:45:22 0:16:23 0:07:21 0:09:02 smithi main centos 8.stream orch/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi053 with status 5: 'sudo systemctl stop ceph-39896958-53ed-11ed-8438-001a4aab830c@mon.smithi053'

fail 7080106 2022-10-24 15:05:54 2022-10-24 22:30:29 2022-10-24 22:47:34 0:17:05 0:10:03 0:07:02 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi003 with status 5: 'sudo systemctl stop ceph-a2339762-53ed-11ed-8438-001a4aab830c@mon.a'

fail 7080107 2022-10-24 15:05:55 2022-10-24 22:30:40 2022-10-24 22:50:26 0:19:46 0:12:03 0:07:43 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi187 with status 5: 'sudo systemctl stop ceph-fa3d0af6-53ed-11ed-8438-001a4aab830c@mon.a'

pass 7080108 2022-10-24 15:05:56 2022-10-24 22:31:20 2022-10-24 22:46:03 0:14:43 0:08:37 0:06:06 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm_repos} 1
fail 7080109 2022-10-24 15:05:58 2022-10-24 22:31:21 2022-10-24 22:45:57 0:14:36 0:08:00 0:06:36 smithi main rhel 8.4 orch/cephadm/smoke/{0-nvme-loop distro/rhel_8.4_container_tools_rhel8 fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi052 with status 5: 'sudo systemctl stop ceph-592f01a0-53ed-11ed-8438-001a4aab830c@mon.a'

fail 7080110 2022-10-24 15:05:59 2022-10-24 22:31:21 2022-10-24 22:46:55 0:15:34 0:05:55 0:09:39 smithi main ubuntu 20.04 orch/cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi099 with status 5: 'sudo systemctl stop ceph-a57f7c74-53ed-11ed-8438-001a4aab830c@mon.smithi099'

fail 7080111 2022-10-24 15:06:00 2022-10-24 22:31:22 2022-10-24 22:46:31 0:15:09 0:08:33 0:06:36 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi008 with status 5: 'sudo systemctl stop ceph-81572b58-53ed-11ed-8438-001a4aab830c@mon.smithi008'

fail 7080112 2022-10-24 15:06:01 2022-10-24 22:31:32 2022-10-24 22:59:22 0:27:50 0:21:13 0:06:37 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
Failure Reason:

Command failed on smithi080 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid dc02da5c-53ed-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7080113 2022-10-24 15:06:02 2022-10-24 22:32:13 2022-10-24 22:55:10 0:22:57 0:16:58 0:05:59 smithi main centos 8.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls mon_election/connectivity} 2
Failure Reason:

Command failed on smithi130 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7558cc08-53ed-11ed-8438-001a4aab830c -e sha1=d88f759b6ee30a8b0d9b03e2bbb29f39d48f9f2a -- bash -c \'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)"\''

fail 7080114 2022-10-24 15:06:03 2022-10-24 22:32:24 2022-10-24 22:49:37 0:17:13 0:09:53 0:07:20 smithi main centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_nfs} 1
Failure Reason:

Command failed on smithi153 with status 5: 'sudo systemctl stop ceph-e54e5d34-53ed-11ed-8438-001a4aab830c@mon.a'

fail 7080115 2022-10-24 15:06:05 2022-10-24 22:32:24 2022-10-24 22:48:39 0:16:15 0:07:57 0:08:18 smithi main ubuntu 18.04 orch/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/fastclose rados thrashers/pggrow thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi061 with status 5: 'sudo systemctl stop ceph-b6f92c02-53ed-11ed-8438-001a4aab830c@mon.a'

fail 7080116 2022-10-24 15:06:06 2022-10-24 22:33:15 2022-10-24 22:46:23 0:13:08 0:05:43 0:07:25 smithi main ubuntu 18.04 orch/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi087 with status 5: 'sudo systemctl stop ceph-5d89b1be-53ed-11ed-8438-001a4aab830c@mon.a'

fail 7080117 2022-10-24 15:06:07 2022-10-24 22:33:15 2022-10-24 22:48:59 0:15:44 0:08:09 0:07:35 smithi main rhel 8.4 orch/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi186 with status 5: 'sudo systemctl stop ceph-cb694ec4-53ed-11ed-8438-001a4aab830c@mon.smithi186'

fail 7080118 2022-10-24 15:06:08 2022-10-24 22:33:56 2022-10-24 22:51:12 0:17:16 0:10:54 0:06:22 smithi main centos 8.stream orch/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi045 with status 5: 'sudo systemctl stop ceph-307783ee-53ee-11ed-8438-001a4aab830c@mon.a'

fail 7080119 2022-10-24 15:06:09 2022-10-24 22:33:56 2022-10-24 22:52:32 0:18:36 0:11:28 0:07:08 smithi main rhel 8.4 orch/cephadm/with-work/{0-distro/rhel_8.4_container_tools_rhel8 fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi116 with status 5: 'sudo systemctl stop ceph-7a79bd86-53ee-11ed-8438-001a4aab830c@mon.a'