ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
ubuntu 20.04
rbd/mirror/{base/install clients/{mirror-extra mirror} cluster/{2-node openstack} conf/{disable-pool-app} msgr-failures/few objectstore/bluestore-comp-zlib supported-random-distro$/{ubuntu_20.04} workloads/rbd-mirror-snapshot-workunit-minimum}
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
ubuntu 22.04
rbd/nbd/{base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} msgr-failures/few objectstore/bluestore-stupid supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/rbd_nbd}
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
ubuntu 20.04
rbd/cli/{base/install clusters/{fixed-1 openstack} conf/{disable-pool-app} features/layering msgr-failures/few objectstore/bluestore-comp-zstd pool/small-cache-pool supported-random-distro$/{ubuntu_20.04} workloads/rbd_cli_migration}
hit max job timeout
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
ubuntu 20.04
rbd/nbd/{base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} msgr-failures/few objectstore/bluestore-bitmap supported-random-distro$/{ubuntu_20.04} thrashers/cache thrashosds-health workloads/rbd_nbd_diff_continuous}
Command failed (workunit test rbd/diff_continuous.sh) on smithi167 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e62d759a6de3042e046d7d9889d2209cbc674fbd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 RBD_DEVICE_TYPE=nbd adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/diff_continuous.sh'
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
ubuntu 22.04
rbd/qemu/{cache/writeback clusters/{fixed-3 openstack} conf/{disable-pool-app} features/defaults msgr-failures/few objectstore/bluestore-low-osd-mem-target pool/replicated-data-pool supported-random-distro$/{ubuntu_latest} workloads/qemu_xfstests}
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
ubuntu 22.04
rbd/mirror/{base/install clients/{mirror-extra mirror} cluster/{2-node openstack} conf/{disable-pool-app} msgr-failures/few objectstore/bluestore-bitmap supported-random-distro$/{ubuntu_latest} workloads/rbd-mirror-workunit-policy-simple}
"2023-10-17T17:53:51.943721+0000 mon.a (mon.0) 177 : cluster [WRN] Health check failed: Reduced data availability: 3 pgs inactive, 3 pgs peering (PG_AVAILABILITY)" in cluster log
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
centos 9.stream
rbd/maintenance/{base/install clusters/{fixed-3 openstack} conf/{disable-pool-app} objectstore/bluestore-hybrid qemu/xfstests supported-random-distro$/{centos_latest} workloads/dynamic_features_no_cache}
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
rhel 8.6
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-comp-zlib 4-supported-random-distro$/{rhel_8} 5-pool/replicated-data-pool 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup conf/{disable-pool-app}}
wip-yuri11-testing-2023-10-10-1226-reef
wip-yuri11-testing-2023-10-10-1226-reef
main
smithi
rhel 8.6
rbd/mirror-thrash/{base/install clients/mirror cluster/{2-node openstack} conf/{disable-pool-app} msgr-failures/few objectstore/bluestore-stupid policy/simple rbd-mirror/four-per-cluster supported-random-distro$/{rhel_8} workloads/rbd-mirror-snapshot-stress-workunit-exclusive-lock}
Command failed (workunit test rbd/rbd_mirror_stress.sh) on smithi181 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.cluster1.mirror/client.mirror/tmp && cd -- /home/ubuntu/cephtest/mnt.cluster1.mirror/client.mirror/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e62d759a6de3042e046d7d9889d2209cbc674fbd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster cluster1" CEPH_ID="mirror" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.cluster1.client.mirror CEPH_ROOT=/home/ubuntu/cephtest/clone.cluster1.client.mirror CEPH_MNT=/home/ubuntu/cephtest/mnt.cluster1.mirror CEPH_ARGS=\'\' MIRROR_IMAGE_MODE=snapshot MIRROR_POOL_MODE=image RBD_IMAGE_FEATURES=layering,exclusive-lock RBD_MIRROR_INSTANCES=4 RBD_MIRROR_USE_EXISTING_CLUSTER=1 RBD_MIRROR_USE_RBD_MIRROR=1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.cluster1.client.mirror/qa/workunits/rbd/rbd_mirror_stress.sh'