ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
rhel 8.6
rbd/cli/{base/install clusters/{fixed-1 openstack} features/layering msgr-failures/few objectstore/bluestore-comp-zstd pool/small-cache-pool supported-random-distro$/{rhel_8} workloads/rbd_cli_migration}
hit max job timeout
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
centos 9.stream
rbd/mirror-thrash/{base/install clients/mirror cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-bitmap policy/none rbd-mirror/four-per-cluster supported-random-distro$/{centos_latest} workloads/rbd-mirror-journal-workunit}
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
ubuntu 22.04
rbd/nbd/{base/install cluster/{fixed-3 openstack} msgr-failures/few objectstore/bluestore-bitmap supported-random-distro$/{ubuntu_latest} thrashers/cache thrashosds-health workloads/rbd_nbd_diff_continuous}
Command failed (workunit test rbd/diff_continuous.sh) on smithi158 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1bf364b918a7ab4708130a64bf96639942959f6d TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 RBD_DEVICE_TYPE=nbd adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/diff_continuous.sh'
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
centos 9.stream
rbd/thrash/{base/install clusters/{fixed-2 openstack} msgr-failures/few objectstore/bluestore-stupid supported-random-distro$/{centos_latest} thrashers/cache thrashosds-health workloads/rbd_fsx_deep_copy}
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
centos 8.stream
rbd/librbd/{cache/none clusters/{fixed-3 openstack} config/copy-on-read min-compat-client/octopus msgr-failures/few objectstore/bluestore-comp-zlib pool/ec-data-pool supported-random-distro$/{centos_8} workloads/rbd_fio}
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
rhel 8.6
rbd/singleton-bluestore/{all/issue-20295 objectstore/bluestore-bitmap openstack supported-random-distro$/{rhel_8}}
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
centos 9.stream
rbd/pwl-cache/tmpfs/{1-base/install 2-cluster/{fix-2 openstack} 3-supported-random-distro$/{centos_latest} 4-cache-path 5-cache-mode/ssd 6-cache-size/5G 7-workloads/qemu_xfstests}
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
centos 9.stream
rbd/singleton/{all/qemu-iotests-writeback objectstore/bluestore-comp-zstd openstack supported-random-distro$/{centos_latest}}
Command failed (workunit test rbd/qemu-iotests.sh) on smithi060 with status 13: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1bf364b918a7ab4708130a64bf96639942959f6d TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/qemu-iotests.sh'
wip-yuri10-testing-2023-07-21-0828-reef
wip-yuri10-testing-2023-07-21-0828-reef
main
smithi
centos 8.stream
rbd/mirror-thrash/{base/install clients/mirror cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-stupid policy/simple rbd-mirror/four-per-cluster supported-random-distro$/{centos_8} workloads/rbd-mirror-snapshot-stress-workunit-exclusive-lock}
hit max job timeout