Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 6548644 2021-12-06 22:05:18 2021-12-06 22:22:54 2021-12-06 22:51:04 0:28:10 0:17:00 0:11:10 smithi master ubuntu 20.04 rados/cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi168 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e6aebcde-56e4-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

pass 6548645 2021-12-06 22:05:19 2021-12-06 22:22:54 2021-12-06 23:16:21 0:53:27 0:46:51 0:06:36 smithi master rhel 8.4 rados/cephadm/with-work/{0-distro/rhel_8.4_container_tools_rhel8 fixed-2 mode/root mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
pass 6548646 2021-12-06 22:05:20 2021-12-06 22:22:54 2021-12-06 23:42:32 1:19:38 1:13:16 0:06:22 smithi master centos 8.stream rados/dashboard/{0-single-container-host debug/mgr mon_election/classic random-objectstore$/{bluestore-hybrid} tasks/dashboard} 2
pass 6548647 2021-12-06 22:05:21 2021-12-06 22:23:05 2021-12-06 23:03:26 0:40:21 0:30:17 0:10:04 smithi master ubuntu 20.04 rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/1.7.0} 1
pass 6548648 2021-12-06 22:05:22 2021-12-06 22:23:15 2021-12-06 22:54:46 0:31:31 0:24:30 0:07:01 smithi master centos 8.stream rados/cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
fail 6548649 2021-12-06 22:05:23 2021-12-06 22:23:35 2021-12-06 22:48:56 0:25:21 0:15:24 0:09:57 smithi master centos 8.2 rados/cephadm/smoke/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi002 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 343ef9e6-56e5-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548650 2021-12-06 22:05:24 2021-12-06 22:23:56 2021-12-06 22:50:20 0:26:24 0:15:57 0:10:27 smithi master centos 8.3 rados/cephadm/smoke/{0-distro/centos_8.3_container_tools_3.0 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi006 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 8dc5972c-56e5-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548651 2021-12-06 22:05:25 2021-12-06 22:24:56 2021-12-06 22:44:42 0:19:46 0:09:32 0:10:14 smithi master centos 8.3 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/pacific backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_latest} mon_election/classic msgr-failures/few rados thrashers/careful thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi027 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 pull'

fail 6548652 2021-12-06 22:05:26 2021-12-06 22:25:17 2021-12-07 05:51:31 7:26:14 7:15:49 0:10:25 smithi master centos 8.3 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind} 2
Failure Reason:

Command failed (workunit test rados/test.sh) on smithi071 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 ALLOW_TIMEOUTS=1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'

fail 6548653 2021-12-06 22:05:27 2021-12-06 22:25:37 2021-12-06 22:43:13 0:17:36 0:11:14 0:06:22 smithi master centos 8.stream rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi098 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c2a04cea-56e4-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

pass 6548654 2021-12-06 22:05:28 2021-12-06 22:25:47 2021-12-06 23:10:44 0:44:57 0:37:11 0:07:46 smithi master rhel 8.4 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{default} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/snaps-few-objects-localized} 2
pass 6548655 2021-12-06 22:05:29 2021-12-06 22:27:18 2021-12-06 22:54:43 0:27:25 0:20:00 0:07:25 smithi master rhel 8.4 rados/singleton-nomsgr/{all/export-after-evict mon_election/classic rados supported-random-distro$/{rhel_8}} 1
pass 6548656 2021-12-06 22:05:30 2021-12-06 22:27:28 2021-12-06 22:48:49 0:21:21 0:11:46 0:09:35 smithi master centos 8.3 rados/singleton/{all/test-crash mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8}} 1
pass 6548657 2021-12-06 22:05:32 2021-12-06 22:27:39 2021-12-06 23:06:21 0:38:42 0:26:17 0:12:25 smithi master ubuntu 20.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/snaps-few-objects} 2
fail 6548658 2021-12-06 22:05:33 2021-12-06 22:28:09 2021-12-06 22:45:30 0:17:21 0:11:11 0:06:10 smithi master centos 8.stream rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi005 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 16a90232-56e5-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548659 2021-12-06 22:05:34 2021-12-06 22:28:19 2021-12-06 22:53:54 0:25:35 0:19:20 0:06:15 smithi master rhel 8.4 rados/cephadm/smoke/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi063 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1d49470e-56e6-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548660 2021-12-06 22:05:35 2021-12-06 22:28:40 2021-12-06 22:50:40 0:22:00 0:14:59 0:07:01 smithi master centos 8.stream rados/dashboard/{0-single-container-host debug/mgr mon_election/connectivity random-objectstore$/{bluestore-comp-zlib} tasks/e2e} 2
Failure Reason:

Command failed (workunit test cephadm/test_dashboard_e2e.sh) on smithi040 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_dashboard_e2e.sh'

fail 6548661 2021-12-06 22:05:36 2021-12-06 22:29:40 2021-12-06 22:47:25 0:17:45 0:06:07 0:11:38 smithi master ubuntu 20.04 rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none 3-final cluster/3-node k8s/1.21 net/flannel rook/master} 3
Failure Reason:

[Errno 2] Cannot find file on the remote 'ubuntu@smithi066.front.sepia.ceph.com': 'rook/cluster/examples/kubernetes/ceph/operator.yaml'

fail 6548662 2021-12-06 22:05:37 2021-12-06 22:30:20 2021-12-06 22:49:50 0:19:30 0:09:18 0:10:12 smithi master centos 8.3 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_latest} mon_election/connectivity msgr-failures/osd-delay rados thrashers/default thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi060 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 pull'

pass 6548663 2021-12-06 22:05:38 2021-12-06 22:30:41 2021-12-07 01:17:04 2:46:23 2:35:27 0:10:56 smithi master ubuntu 20.04 rados/objectstore/{backends/filestore-idempotent-aio-journal supported-random-distro$/{ubuntu_latest}} 1
pass 6548664 2021-12-06 22:05:39 2021-12-06 22:30:41 2021-12-06 22:57:48 0:27:07 0:18:14 0:08:53 smithi master centos 8.2 rados/cephadm/smoke-roleless/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
fail 6548665 2021-12-06 22:05:40 2021-12-06 22:30:52 2021-12-06 22:58:27 0:27:35 0:19:58 0:07:37 smithi master rhel 8.4 rados/cephadm/smoke/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi079 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 63f67140-56e6-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

pass 6548666 2021-12-06 22:05:41 2021-12-06 22:31:12 2021-12-06 23:11:26 0:40:14 0:28:15 0:11:59 smithi master ubuntu 20.04 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/bluestore-comp-zlib rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
fail 6548667 2021-12-06 22:05:42 2021-12-06 22:32:42 2021-12-06 22:59:19 0:26:37 0:14:06 0:12:31 smithi master rados/cephadm/workunits/{agent/off mon_election/classic task/test_orch_cli} 1
Failure Reason:

Test failure: test_yaml (tasks.cephadm_cases.test_cli.TestCephadmCLI)

pass 6548668 2021-12-06 22:05:43 2021-12-06 22:33:23 2021-12-06 23:14:40 0:41:17 0:32:13 0:09:04 smithi master centos 8.3 rados/cephadm/thrash/{0-distro/centos_8.3_container_tools_3.0 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
pass 6548669 2021-12-06 22:05:44 2021-12-06 22:33:23 2021-12-06 23:08:41 0:35:18 0:23:48 0:11:30 smithi master centos 8.3 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/cache-pool-snaps} 2
fail 6548670 2021-12-06 22:05:45 2021-12-06 22:35:14 2021-12-06 22:56:41 0:21:27 0:09:28 0:11:59 smithi master ubuntu 20.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr mon_election/connectivity random-objectstore$/{bluestore-comp-zlib} supported-random-distro$/{ubuntu_latest} tasks/prometheus} 2
Failure Reason:

Test failure: test_standby (tasks.mgr.test_prometheus.TestPrometheus)

fail 6548671 2021-12-06 22:05:46 2021-12-06 22:36:45 2021-12-06 23:05:15 0:28:30 0:17:04 0:11:26 smithi master ubuntu 20.04 rados/cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi070 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e127031e-56e6-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

dead 6548672 2021-12-06 22:05:47 2021-12-06 22:37:05 2021-12-07 10:50:40 12:13:35 smithi master centos 8.stream rados/cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

hit max job timeout

fail 6548673 2021-12-06 22:05:48 2021-12-06 22:40:36 2021-12-06 23:01:48 0:21:12 0:09:20 0:11:52 smithi master centos 8.3 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v2only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_latest} mon_election/classic msgr-failures/fastclose rados thrashers/mapgap thrashosds-health workloads/rbd_cls} 3
Failure Reason:

Command failed on smithi104 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 pull'

pass 6548674 2021-12-06 22:05:49 2021-12-06 22:42:36 2021-12-06 23:18:40 0:36:04 0:25:58 0:10:06 smithi master centos 8.3 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zlib rados tasks/rados_api_tests validater/lockdep} 2
fail 6548675 2021-12-06 22:05:50 2021-12-06 22:42:57 2021-12-06 23:08:20 0:25:23 0:14:53 0:10:30 smithi master centos 8.2 rados/cephadm/smoke/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi098 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e0fc7b48-56e7-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

pass 6548676 2021-12-06 22:05:51 2021-12-06 22:43:17 2021-12-06 23:08:39 0:25:22 0:15:58 0:09:24 smithi master centos 8.3 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/fastclose objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{centos_8} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3} 3
fail 6548677 2021-12-06 22:05:52 2021-12-06 22:43:28 2021-12-06 23:10:38 0:27:10 0:15:37 0:11:33 smithi master centos 8.3 rados/cephadm/smoke/{0-distro/centos_8.3_container_tools_3.0 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi027 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6b9af0fe-56e8-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548678 2021-12-06 22:05:53 2021-12-06 22:44:48 2021-12-06 23:02:36 0:17:48 0:11:04 0:06:44 smithi master centos 8.stream rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi005 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 791dc5cc-56e7-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

pass 6548679 2021-12-06 22:05:54 2021-12-06 22:45:38 2021-12-06 23:23:31 0:37:53 0:29:58 0:07:55 smithi master centos 8.stream rados/cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
fail 6548680 2021-12-06 22:05:56 2021-12-06 22:46:09 2021-12-06 23:06:15 0:20:06 0:09:30 0:10:36 smithi master centos 8.3 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_latest} mon_election/connectivity msgr-failures/few rados thrashers/morepggrow thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

Command failed on smithi026 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 pull'

fail 6548681 2021-12-06 22:05:57 2021-12-06 22:46:59 2021-12-06 23:05:01 0:18:02 0:10:29 0:07:33 smithi master centos 8.stream rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi094 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ccb3c1fa-56e7-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

pass 6548682 2021-12-06 22:05:57 2021-12-06 22:47:30 2021-12-06 23:10:57 0:23:27 0:12:53 0:10:34 smithi master centos 8.3 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-hybrid rados tasks/mon_recovery validater/lockdep} 2
fail 6548683 2021-12-06 22:05:59 2021-12-06 22:47:30 2021-12-06 23:14:33 0:27:03 0:20:33 0:06:30 smithi master rhel 8.4 rados/cephadm/smoke/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi053 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b9addebe-56e8-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548684 2021-12-06 22:06:00 2021-12-06 22:47:30 2021-12-06 23:15:17 0:27:47 0:19:39 0:08:08 smithi master rhel 8.4 rados/cephadm/smoke/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi167 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c38aefb2-56e8-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548685 2021-12-06 22:06:01 2021-12-06 22:47:51 2021-12-06 23:07:38 0:19:47 0:08:51 0:10:56 smithi master centos 8.3 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/octopus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_latest} mon_election/classic msgr-failures/osd-delay rados thrashers/none thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

Command failed on smithi093 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 pull'

pass 6548686 2021-12-06 22:06:02 2021-12-06 22:48:11 2021-12-06 23:31:25 0:43:14 0:33:29 0:09:45 smithi master centos 8.3 rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8}} 1
fail 6548687 2021-12-06 22:06:03 2021-12-06 22:48:11 2021-12-06 23:13:00 0:24:49 0:14:05 0:10:44 smithi master rados/cephadm/workunits/{agent/on mon_election/connectivity task/test_orch_cli} 1
Failure Reason:

Test failure: test_yaml (tasks.cephadm_cases.test_cli.TestCephadmCLI)

fail 6548688 2021-12-06 22:06:04 2021-12-06 22:48:52 2021-12-06 23:18:49 0:29:57 0:20:03 0:09:54 smithi master ubuntu 20.04 rados/cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi068 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 8ca50fdc-56e8-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548689 2021-12-06 22:06:05 2021-12-06 22:48:52 2021-12-06 23:09:46 0:20:54 0:14:56 0:05:58 smithi master centos 8.stream rados/dashboard/{0-single-container-host debug/mgr mon_election/classic random-objectstore$/{bluestore-comp-lz4} tasks/e2e} 2
Failure Reason:

Command failed (workunit test cephadm/test_dashboard_e2e.sh) on smithi002 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_dashboard_e2e.sh'

fail 6548690 2021-12-06 22:06:06 2021-12-06 22:49:02 2021-12-06 23:08:10 0:19:08 0:06:13 0:12:55 smithi master ubuntu 20.04 rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none 3-final cluster/3-node k8s/1.21 net/calico rook/master} 3
Failure Reason:

[Errno 2] Cannot find file on the remote 'ubuntu@smithi060.front.sepia.ceph.com': 'rook/cluster/examples/kubernetes/ceph/operator.yaml'

fail 6548691 2021-12-06 22:06:07 2021-12-06 22:49:53 2021-12-06 23:15:24 0:25:31 0:15:10 0:10:21 smithi master centos 8.2 rados/cephadm/smoke/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi006 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid eb188fe4-56e8-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548692 2021-12-06 22:06:08 2021-12-06 22:50:23 2021-12-06 23:09:52 0:19:29 0:09:14 0:10:15 smithi master centos 8.3 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/pacific backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_latest} mon_election/connectivity msgr-failures/fastclose rados thrashers/pggrow thrashosds-health workloads/cache-snaps} 3
Failure Reason:

Command failed on smithi040 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 pull'

fail 6548693 2021-12-06 22:06:09 2021-12-06 22:50:44 2021-12-06 23:16:28 0:25:44 0:15:19 0:10:25 smithi master centos 8.3 rados/cephadm/smoke/{0-distro/centos_8.3_container_tools_3.0 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi168 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 34c5a096-56e9-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548694 2021-12-06 22:06:10 2021-12-06 22:51:14 2021-12-06 23:08:50 0:17:36 0:10:03 0:07:33 smithi master centos 8.stream rados/mgr/{clusters/{2-node-mgr} debug/mgr mon_election/classic random-objectstore$/{bluestore-stupid} supported-random-distro$/{centos_8.stream} tasks/prometheus} 2
Failure Reason:

Test failure: test_standby (tasks.mgr.test_prometheus.TestPrometheus)

dead 6548695 2021-12-06 22:06:11 2021-12-06 22:51:54 2021-12-07 11:02:08 12:10:14 smithi master centos 8.stream rados/cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

hit max job timeout

pass 6548696 2021-12-06 22:06:12 2021-12-06 22:52:15 2021-12-06 23:13:53 0:21:38 0:08:59 0:12:39 smithi master ubuntu 20.04 rados/multimon/{clusters/3 mon_election/classic msgr-failures/few msgr/async no_pools objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest} tasks/mon_recovery} 2
fail 6548697 2021-12-06 22:06:13 2021-12-06 22:53:55 2021-12-07 00:57:26 2:03:31 1:53:06 0:10:25 smithi master centos 8.3 rados/standalone/{supported-random-distro$/{centos_8} workloads/scrub} 1
Failure Reason:

Command failed (workunit test scrub/osd-scrub-repair.sh) on smithi204 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-scrub-repair.sh'

fail 6548698 2021-12-06 22:06:14 2021-12-06 22:53:56 2021-12-06 23:13:31 0:19:35 0:11:31 0:08:04 smithi master centos 8.stream rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi043 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b6f1de5a-56e8-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

pass 6548699 2021-12-06 22:06:15 2021-12-06 22:54:16 2021-12-06 23:29:28 0:35:12 0:23:31 0:11:41 smithi master ubuntu 20.04 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/fastclose objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/minsize_recovery thrashosds-health workloads/ec-rados-plugin=jerasure-k=3-m=1} 2
fail 6548700 2021-12-06 22:06:16 2021-12-06 22:54:46 2021-12-06 23:11:57 0:17:11 0:10:55 0:06:16 smithi master centos 8.stream rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi078 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c491730e-56e8-11ec-8c2e-001a4aab830c -- bash -c 'ceph orch ls --format yaml'"

fail 6548701 2021-12-06 22:06:17 2021-12-06 22:54:47 2021-12-06 23:15:56 0:21:09 0:09:13 0:11:56 smithi master centos 8.3 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_latest} mon_election/classic msgr-failures/few rados thrashers/careful thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi013 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:a3b146a8fc1f45c2319c2c383e97c4c255eb1f00 pull'