ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8} thrashers/none thrashosds-health workloads/radosbench-high-concurrency}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} tasks/rados_api_tests}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-stupid rados recovery-overrides/{default} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/recovery-unfound-found mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
rhel 8.6
rados/singleton/{all/pg-autoscaler-progress-off mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8}}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-delay objectstore/bluestore-hybrid rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=clay-k=4-m=2}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/redirect}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/off fixed-2 mon_election/connectivity start}
Command failed on smithi081 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b40a3bed7bdeff6224dd522c5b540bbe0d11c858 pull'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/filestore-xfs rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/redirect_promote_tests}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} tasks/rados_cls_all}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
rhel 8.6
rados/multimon/{clusters/21 mon_election/connectivity msgr-failures/many msgr/async no_pools objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8} tasks/mon_recovery}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/calico rook/1.7.2}
Command failed on smithi133 with status 1: 'kubectl create -f rook/cluster/examples/kubernetes/ceph/crds.yaml -f rook/cluster/examples/kubernetes/ceph/common.yaml -f operator.yaml'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v1only objectstore/filestore-xfs rados tasks/rados_api_tests validater/lockdep}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/singleton/{all/radostool mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest}}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/set-chunks-read}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/small-objects-balanced}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/rm-zap-wait}
Command failed on smithi130 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b40a3bed7bdeff6224dd522c5b540bbe0d11c858 pull'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/objectstore/{backends/ceph_objectstore_tool supported-random-distro$/{ubuntu_latest}}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
rhel 8.6
rados/singleton/{all/resolve_stuck_peering mon_election/classic msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8}}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/few objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/singleton/{all/test_envlibrados_for_rocksdb mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/filestore-xfs rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test rados/test_envlibrados_for_rocksdb.sh) on smithi157 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=b40a3bed7bdeff6224dd522c5b540bbe0d11c858 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_envlibrados_for_rocksdb.sh'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/flannel rook/master}
'wait for toolbox' reached maximum tries (100) after waiting for 500 seconds
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools agent/on mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi036 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=b40a3bed7bdeff6224dd522c5b540bbe0d11c858 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/host rook/1.7.2}
Command failed on smithi138 with status 1: 'kubectl create -f rook/cluster/examples/kubernetes/ceph/crds.yaml -f rook/cluster/examples/kubernetes/ceph/common.yaml -f operator.yaml'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-comp-zstd rados tasks/rados_cls_all validater/valgrind}
Command failed (workunit test cls/test_cls_2pc_queue.sh) on smithi032 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=b40a3bed7bdeff6224dd522c5b540bbe0d11c858 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_2pc_queue.sh'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}}
Command failed on smithi107 with status 1: 'TESTDIR=/home/ubuntu/cephtest bash -s'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_orch_cli}
Command failed on smithi078 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b40a3bed7bdeff6224dd522c5b540bbe0d11c858 pull'
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
centos 8.stream
rados/objectstore/{backends/objectstore-bluestore-a supported-random-distro$/{centos_8}}
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
rhel 8.6
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=clay-k=4-m=2}
hit max job timeout
wip-yuri10-testing-2022-10-19-0810
wip-yuri10-testing-2022-10-19-0810
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/calico rook/master}
'wait for toolbox' reached maximum tries (100) after waiting for 500 seconds