ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/mirror-thrash/{base/install clients/mirror cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-comp-lz4 policy/none rbd-mirror/four-per-cluster supported-random-distro$/{rhel_8} workloads/rbd-mirror-snapshot-stress-workunit-fast-diff}
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/cli_v1/{base/install clusters/{fixed-1 openstack} features/format-1 msgr-failures/few objectstore/bluestore-bitmap pool/none supported-random-distro$/{ubuntu_latest} workloads/rbd_cli_generic}
Command failed (workunit test rbd/cli_generic.sh) on smithi178 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=465d59c7325658c86eb5d8820da2d8fc49b7a1cd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/cli_generic.sh'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/singleton-bluestore/{all/issue-20295 objectstore/bluestore-bitmap openstack supported-random-distro$/{rhel_8}}
Exiting scrub checking -- not all pgs scrubbed.
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/filestore-xfs 4-supported-random-distro$/{centos_8} 5-pool/ec-data-pool 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
"2023-05-21T16:49:07.201233+0000 mon.a (mon.0) 272 : cluster [WRN] Health check failed: 5 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/librbd/{cache/none clusters/{fixed-3 openstack} config/permit-partial-discard min-compat-client/octopus msgr-failures/few objectstore/filestore-xfs pool/ec-data-pool supported-random-distro$/{ubuntu_latest} workloads/rbd_fio}
"2023-05-21T15:45:06.852353+0000 mon.a (mon.0) 210 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/cli/{base/install clusters/{fixed-1 openstack} features/layering msgr-failures/few objectstore/filestore-xfs pool/small-cache-pool supported-random-distro$/{centos_8} workloads/rbd_cli_generic}
Command failed (workunit test rbd/cli_generic.sh) on smithi027 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=465d59c7325658c86eb5d8820da2d8fc49b7a1cd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/cli_generic.sh'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-bitmap 4-supported-random-distro$/{centos_8} 5-pool/none 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
Exiting scrub checking -- not all pgs scrubbed.
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/cli_v1/{base/install clusters/{fixed-1 openstack} features/format-1 msgr-failures/few objectstore/bluestore-comp-snappy pool/none supported-random-distro$/{ubuntu_latest} workloads/rbd_cli_generic}
Command failed (workunit test rbd/cli_generic.sh) on smithi149 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=465d59c7325658c86eb5d8820da2d8fc49b7a1cd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/cli_generic.sh'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/librbd/{cache/writearound clusters/{fixed-3 openstack} config/copy-on-read min-compat-client/octopus msgr-failures/few objectstore/bluestore-comp-lz4 pool/none supported-random-distro$/{rhel_8} workloads/c_api_tests_with_defaults}
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-comp-lz4 4-supported-random-distro$/{rhel_8} 5-pool/replicated-data-pool 6-prepare/raw-file 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
"2023-05-21T16:48:50.790972+0000 mon.a (mon.0) 237 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/pwl-cache/home/{1-base/install 2-cluster/{fix-2 openstack} 3-supported-random-distro$/{centos_8} 4-cache-path 5-cache-mode/ssd 6-cache-size/8G 7-workloads/fio}
"2023-05-21T17:24:18.800731+0000 mon.a (mon.0) 117 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/librbd/{cache/writeback clusters/{fixed-3 openstack} config/none min-compat-client/default msgr-failures/few objectstore/bluestore-comp-snappy pool/replicated-data-pool supported-random-distro$/{rhel_8} workloads/c_api_tests_with_journaling}
Exiting scrub checking -- not all pgs scrubbed.
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-comp-snappy 4-supported-random-distro$/{rhel_8} 5-pool/ec-data-pool 6-prepare/qcow2-file 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
"2023-05-21T17:13:22.281748+0000 mon.a (mon.0) 251 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/cli_v1/{base/install clusters/{fixed-1 openstack} features/format-1 msgr-failures/few objectstore/bluestore-comp-zstd pool/none supported-random-distro$/{rhel_8} workloads/rbd_cli_generic}
Command failed (workunit test rbd/cli_generic.sh) on smithi191 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=465d59c7325658c86eb5d8820da2d8fc49b7a1cd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/cli_generic.sh'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/immutable-object-cache/{clusters/{fix-2 openstack} pool/ceph_and_immutable_object_cache supported-random-distro$/{ubuntu_latest} workloads/fio_on_immutable_object_cache}
Exiting scrub checking -- not all pgs scrubbed.
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/pwl-cache/tmpfs/{1-base/install 2-cluster/{fix-2 openstack} 3-supported-random-distro$/{rhel_8} 4-cache-path 5-cache-mode/ssd 6-cache-size/5G 7-workloads/qemu_xfstests}
Command failed on smithi182 with status 1: 'test -f /home/ubuntu/cephtest/archive/qemu/client.0/success'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-comp-zlib 4-supported-random-distro$/{ubuntu_latest} 5-pool/none 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
"2023-05-21T17:35:03.296025+0000 mon.a (mon.0) 181 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/cli_v1/{base/install clusters/{fixed-1 openstack} features/format-1 msgr-failures/few objectstore/bluestore-low-osd-mem-target pool/none supported-random-distro$/{ubuntu_latest} workloads/rbd_cli_generic}
Command failed (workunit test rbd/cli_generic.sh) on smithi057 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=465d59c7325658c86eb5d8820da2d8fc49b7a1cd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/cli_generic.sh'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/singleton-bluestore/{all/issue-20295 objectstore/bluestore-comp-snappy openstack supported-random-distro$/{centos_8}}
Exiting scrub checking -- not all pgs scrubbed.
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-comp-zstd 4-supported-random-distro$/{centos_8} 5-pool/replicated-data-pool 6-prepare/raw-file 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
Exiting scrub checking -- not all pgs scrubbed.
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/cli/{base/install clusters/{fixed-1 openstack} features/defaults msgr-failures/few objectstore/bluestore-comp-zlib pool/small-cache-pool supported-random-distro$/{ubuntu_latest} workloads/rbd_cli_migration}
hit max job timeout
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-hybrid 4-supported-random-distro$/{centos_8} 5-pool/ec-data-pool 6-prepare/qcow2-file 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
"2023-05-21T17:05:16.474591+0000 mon.a (mon.0) 268 : cluster [WRN] Health check failed: 3 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/cli/{base/install clusters/{fixed-1 openstack} features/journaling msgr-failures/few objectstore/bluestore-comp-zstd pool/ec-data-pool supported-random-distro$/{rhel_8} workloads/rbd_cli_generic}
Command failed (workunit test rbd/cli_generic.sh) on smithi158 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=465d59c7325658c86eb5d8820da2d8fc49b7a1cd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/cli_generic.sh'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/cli_v1/{base/install clusters/{fixed-1 openstack} features/format-1 msgr-failures/few objectstore/filestore-xfs pool/none supported-random-distro$/{ubuntu_latest} workloads/rbd_cli_generic}
Command failed (workunit test rbd/cli_generic.sh) on smithi110 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=465d59c7325658c86eb5d8820da2d8fc49b7a1cd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/cli_generic.sh'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-low-osd-mem-target 4-supported-random-distro$/{ubuntu_latest} 5-pool/none 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
"2023-05-21T17:50:52.328588+0000 mon.a (mon.0) 191 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/librbd/{cache/writethrough clusters/{fixed-3 openstack} config/copy-on-read min-compat-client/octopus msgr-failures/few objectstore/bluestore-stupid pool/small-cache-pool supported-random-distro$/{rhel_8} workloads/rbd_fio}
"2023-05-21T16:15:46.319349+0000 mon.a (mon.0) 316 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
rhel 8.4
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-stupid 4-supported-random-distro$/{rhel_8} 5-pool/replicated-data-pool 6-prepare/raw-file 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
"2023-05-21T17:13:17.522621+0000 mon.a (mon.0) 184 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
ubuntu 20.04
rbd/cli_v1/{base/install clusters/{fixed-1 openstack} features/format-1 msgr-failures/few objectstore/bluestore-comp-lz4 pool/none supported-random-distro$/{ubuntu_latest} workloads/rbd_cli_generic}
Command failed (workunit test rbd/cli_generic.sh) on smithi170 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=465d59c7325658c86eb5d8820da2d8fc49b7a1cd TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/cli_generic.sh'
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/singleton/{all/read-flags-writethrough objectstore/bluestore-low-osd-mem-target openstack supported-random-distro$/{centos_8}}
wip-yuri4-testing-2023-05-18-0754-quincy
wip-yuri4-testing-2023-05-18-0754-quincy
main
smithi
centos 8.stream
rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/filestore-xfs 4-supported-random-distro$/{centos_8} 5-pool/ec-data-pool 6-prepare/qcow2-file 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
Exiting scrub checking -- not all pgs scrubbed.