ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados tasks/mon_recovery validater/lockdep}
Command failed on smithi180 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd dump --format=json'
master
wip-crimson-bughunt-suite
master
smithi
rhel 8.3
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8} thrashers/force-sync-many workloads/rados_mon_workunits}
Command failed on smithi117 with status 1: 'sudo yum -y install ceph-mgr-cephadm'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/multimon/{clusters/6 mon_election/classic msgr-failures/few msgr/async-v2only no_pools objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8} tasks/mon_clock_with_skews}
master
wip-crimson-bughunt-suite
master
smithi
rhel 8.3
rados/singleton/{all/ec-lost-unfound mon_election/classic msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8}}
Command failed on smithi042 with status 1: 'sudo yum -y install ceph-mgr-cephadm'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root}
Command failed on smithi097 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e57ad63cbb5b6a892b01173f2ef5f987ad9e901c-crimson -v bootstrap --fsid d20556c4-4be2-11ec-8c2d-001a4aab830c --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 172.21.15.97 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/singleton/{all/lost-unfound mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8}}
hit max job timeout
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} tasks/rados_stress_watch}
Command failed (workunit test rados/stress_watch.sh) on smithi065 with status 134: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6dd8f56c934524d2d7a9559b8a83bc15b690b8e3 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/stress_watch.sh'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/multimon/{clusters/3 mon_election/connectivity msgr-failures/many msgr/async-v2only no_pools objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} tasks/mon_clock_with_skews}
master
wip-crimson-bughunt-suite
master
smithi
rhel 8.3
rados/cephadm/with-work/{0-distro/rhel_8.3_kubic_stable fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_api_tests}
Command failed on smithi179 with status 1: 'sudo yum -y install ceph-mgr-cephadm'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v2only root}
Command failed on smithi131 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e57ad63cbb5b6a892b01173f2ef5f987ad9e901c-crimson -v bootstrap --fsid 5d14ae9a-4be3-11ec-8c2d-001a4aab830c --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 172.21.15.131 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-active-recovery} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-dispatch-delay msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/small-objects-balanced}
hit max job timeout
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/singleton/{all/osd-recovery-incomplete mon_election/classic msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8}}
wait_for_clean: failed before timeout expired
master
wip-crimson-bughunt-suite
master
smithi
rhel 8.3
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8} tasks/rados_workunit_loadgen_mix}
Command failed on smithi194 with status 1: 'sudo yum -y install ceph-radosgw ceph-test ceph ceph-base cephadm ceph-immutable-object-cache ceph-mgr ceph-mgr-dashboard ceph-mgr-diskprediction-local ceph-mgr-rook ceph-mgr-cephadm ceph-fuse librados-devel libcephfs2 libcephfs-devel librados2 librbd1 python3-rados python3-rgw python3-cephfs python3-rbd rbd-fuse rbd-mirror rbd-nbd sqlite-devel sqlite-devel sqlite-devel sqlite-devel'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/singleton/{all/pg-autoscaler-progress-off mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8}}
Command failed (workunit test mon/pg_autoscaler.sh) on smithi086 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6dd8f56c934524d2d7a9559b8a83bc15b690b8e3 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/mon/pg_autoscaler.sh'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados tasks/mon_recovery validater/lockdep}
Command failed on smithi027 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd dump --format=json'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.2
rados/cephadm/with-work/{0-distro/centos_8.2_kubic_stable fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_python}
Command failed on smithi035 with status 1: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:e57ad63cbb5b6a892b01173f2ef5f987ad9e901c-crimson -v bootstrap --fsid 9b6e21b2-4be3-11ec-8c2d-001a4aab830c --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 172.21.15.35 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} tasks/repair_test}
Command failed on smithi039 with status 11: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph pg scrub 3.0'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/singleton-bluestore/{all/cephtool mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8}}
Command failed (workunit test cephtool/test.sh) on smithi146 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6dd8f56c934524d2d7a9559b8a83bc15b690b8e3 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
master
wip-crimson-bughunt-suite
master
smithi
rhel 8.3
rados/singleton/{all/test_envlibrados_for_rocksdb mon_election/classic msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8}}
Command failed on smithi094 with status 1: 'sudo yum -y install ceph-mgr-cephadm'
master
wip-crimson-bughunt-suite
master
smithi
rhel 8.3
rados/singleton/{all/thrash-rados/{thrash-rados thrashosds-health} mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8}}
Command failed on smithi118 with status 1: 'sudo yum -y install ceph-mgr-cephadm'
master
wip-crimson-bughunt-suite
master
smithi
centos 8.3
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-delay msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/dedup-io-mixed}
hit max job timeout
master
wip-crimson-bughunt-suite
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root}
Command failed on smithi058 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e57ad63cbb5b6a892b01173f2ef5f987ad9e901c-crimson -v bootstrap --fsid e421ace4-4be3-11ec-8c2d-001a4aab830c --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 172.21.15.58 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring'
master
wip-crimson-bughunt-suite
master
smithi
rhel 8.3
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} tasks/rados_api_tests}
Command failed on smithi183 with status 1: 'sudo yum -y install ceph-radosgw ceph-test ceph ceph-base cephadm ceph-immutable-object-cache ceph-mgr ceph-mgr-dashboard ceph-mgr-diskprediction-local ceph-mgr-rook ceph-mgr-cephadm ceph-fuse librados-devel libcephfs2 libcephfs-devel librados2 librbd1 python3-rados python3-rgw python3-cephfs python3-rbd rbd-fuse rbd-mirror rbd-nbd sqlite-devel sqlite-devel sqlite-devel sqlite-devel'
master
wip-crimson-bughunt-suite
master
smithi
rhel 8.3
rados/singleton/{all/admin-socket mon_election/classic msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8}}
Command failed on smithi085 with status 1: 'sudo yum -y install ceph-mgr-cephadm'