Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7285455 2023-05-24 19:40:26 2023-05-24 20:13:19 2023-05-24 20:34:52 0:21:33 0:10:29 0:11:04 smithi main centos 9.stream rbd/singleton/{all/qemu-iotests-writeback objectstore/bluestore-comp-lz4 openstack supported-random-distro$/{centos_latest}} 1
Failure Reason:

Command failed (workunit test rbd/qemu-iotests.sh) on smithi157 with status 13: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2c06d7419cb6b639b0e699e672a277d5e998ef86 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/qemu-iotests.sh'

pass 7285456 2023-05-24 19:40:26 2023-05-24 20:13:19 2023-05-24 20:35:05 0:21:46 0:11:41 0:10:05 smithi main centos 9.stream rbd/basic/{base/install cachepool/none clusters/{fixed-1 openstack} msgr-failures/few objectstore/bluestore-comp-zlib supported-random-distro$/{centos_latest} tasks/rbd_cls_tests} 1
pass 7285457 2023-05-24 19:40:27 2023-05-24 20:14:10 2023-05-24 20:42:31 0:28:21 0:19:29 0:08:52 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-hybrid validator/memcheck workloads/fsx} 1
pass 7285458 2023-05-24 19:40:28 2023-05-24 20:14:10 2023-05-24 22:04:06 1:49:56 1:39:29 0:10:27 smithi main centos 9.stream rbd/maintenance/{base/install clusters/{fixed-3 openstack} objectstore/bluestore-stupid qemu/xfstests supported-random-distro$/{centos_latest} workloads/dynamic_features_no_cache} 3
pass 7285459 2023-05-24 19:40:29 2023-05-24 20:14:51 2023-05-24 20:39:36 0:24:45 0:15:08 0:09:37 smithi main centos 9.stream rbd/cli/{base/install clusters/{fixed-1 openstack} features/defaults msgr-failures/few objectstore/bluestore-comp-snappy pool/none supported-random-distro$/{centos_latest} workloads/rbd_cli_import_export} 1
pass 7285460 2023-05-24 19:40:30 2023-05-24 20:14:51 2023-05-24 20:50:12 0:35:21 0:25:54 0:09:27 smithi main centos 9.stream rbd/singleton/{all/qos objectstore/bluestore-comp-zlib openstack supported-random-distro$/{centos_latest}} 1
pass 7285461 2023-05-24 19:40:31 2023-05-24 20:14:51 2023-05-24 21:26:44 1:11:53 1:01:25 0:10:28 smithi main centos 9.stream rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-comp-zstd 4-supported-random-distro$/{centos_latest} 5-pool/none 6-prepare/qcow2-file 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup} 3
pass 7285462 2023-05-24 19:40:32 2023-05-24 20:15:12 2023-05-24 20:48:32 0:33:20 0:23:34 0:09:46 smithi main centos 9.stream rbd/mirror/{base/install clients/{mirror-extra mirror} cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-comp-zlib supported-random-distro$/{centos_latest} workloads/rbd-mirror-snapshot-workunit-minimum} 2
fail 7285463 2023-05-24 19:40:32 2023-05-24 20:15:22 2023-05-24 20:42:09 0:26:47 0:17:40 0:09:07 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-low-osd-mem-target validator/memcheck workloads/python_api_tests} 1
Failure Reason:

Command failed (workunit test rbd/test_librbd_python.sh) on smithi139 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2c06d7419cb6b639b0e699e672a277d5e998ef86 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 RBD_FEATURES=1 VALGRIND=\'--tool=memcheck --leak-check=full\' adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/test_librbd_python.sh'

pass 7285464 2023-05-24 19:40:33 2023-05-24 20:15:23 2023-05-24 20:48:26 0:33:03 0:20:55 0:12:08 smithi main centos 9.stream rbd/thrash/{base/install clusters/{fixed-2 openstack} msgr-failures/few objectstore/bluestore-hybrid supported-random-distro$/{centos_latest} thrashers/cache thrashosds-health workloads/rbd_fsx_cache_writethrough} 2
pass 7285465 2023-05-24 19:40:34 2023-05-24 20:17:24 2023-05-24 21:28:31 1:11:07 1:01:19 0:09:48 smithi main centos 9.stream rbd/mirror-thrash/{base/install clients/mirror cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-stupid policy/simple rbd-mirror/four-per-cluster supported-random-distro$/{centos_latest} workloads/rbd-mirror-journal-stress-workunit} 2
pass 7285466 2023-05-24 19:40:35 2023-05-24 21:32:35 3806 smithi main centos 9.stream rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-hybrid 4-supported-random-distro$/{centos_latest} 5-pool/replicated-data-pool 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup} 3
fail 7285467 2023-05-24 19:40:36 2023-05-24 20:45:53 1070 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-stupid validator/memcheck workloads/python_api_tests_with_defaults} 1
Failure Reason:

Command failed (workunit test rbd/test_librbd_python.sh) on smithi191 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2c06d7419cb6b639b0e699e672a277d5e998ef86 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 RBD_FEATURES=61 VALGRIND=\'--tool=memcheck --leak-check=full\' adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/test_librbd_python.sh'

pass 7285468 2023-05-24 19:40:37 2023-05-24 20:18:45 2023-05-24 20:41:49 0:23:04 0:13:13 0:09:51 smithi main centos 9.stream rbd/singleton/{all/rbd_tasks objectstore/bluestore-low-osd-mem-target openstack supported-random-distro$/{centos_latest}} 1
pass 7285469 2023-05-24 19:40:38 2023-05-24 20:18:46 2023-05-24 20:39:51 0:21:05 0:11:42 0:09:23 smithi main centos 9.stream rbd/cli_v1/{base/install clusters/{fixed-1 openstack} features/format-1 msgr-failures/few objectstore/bluestore-low-osd-mem-target pool/small-cache-pool supported-random-distro$/{centos_latest} workloads/rbd_cli_import_export} 1
pass 7285470 2023-05-24 19:40:38 2023-05-24 22:06:49 5814 smithi main centos 9.stream rbd/maintenance/{base/install clusters/{fixed-3 openstack} objectstore/bluestore-comp-snappy qemu/xfstests supported-random-distro$/{centos_latest} workloads/dynamic_features_no_cache} 3
pass 7285471 2023-05-24 19:40:39 2023-05-24 20:19:37 2023-05-24 20:38:42 0:19:05 0:08:47 0:10:18 smithi main centos 9.stream rbd/singleton/{all/rbdmap_RBDMAPFILE objectstore/bluestore-stupid openstack supported-random-distro$/{centos_latest}} 1
pass 7285472 2023-05-24 19:40:40 2023-05-24 20:19:57 2023-05-24 20:56:05 0:36:08 0:25:33 0:10:35 smithi main centos 9.stream rbd/mirror/{base/install clients/{mirror-extra mirror} cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-low-osd-mem-target supported-random-distro$/{centos_latest} workloads/rbd-mirror-workunit-min-compat-client-octopus} 2
fail 7285473 2023-05-24 19:40:41 2023-05-24 20:20:58 2023-05-24 20:51:43 0:30:45 0:20:53 0:09:52 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-bitmap validator/memcheck workloads/python_api_tests_with_journaling} 1
Failure Reason:

Command failed (workunit test rbd/test_librbd_python.sh) on smithi195 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2c06d7419cb6b639b0e699e672a277d5e998ef86 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 RBD_FEATURES=125 VALGRIND=\'--tool=memcheck --leak-check=full\' adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/test_librbd_python.sh'

pass 7285474 2023-05-24 19:40:42 2023-05-24 20:20:58 2023-05-24 21:46:52 1:25:54 1:14:55 0:10:59 smithi main centos 9.stream rbd/maintenance/{base/install clusters/{fixed-3 openstack} objectstore/bluestore-comp-zlib qemu/xfstests supported-random-distro$/{centos_latest} workloads/rebuild_object_map} 3
pass 7285475 2023-05-24 19:40:43 2023-05-24 20:21:08 2023-05-24 21:30:04 1:08:56 0:56:53 0:12:03 smithi main centos 9.stream rbd/mirror-thrash/{base/install clients/mirror cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-comp-lz4 policy/simple rbd-mirror/four-per-cluster supported-random-distro$/{centos_latest} workloads/rbd-mirror-snapshot-stress-workunit-exclusive-lock} 2
pass 7285476 2023-05-24 19:40:44 2023-05-24 20:23:19 2023-05-24 20:58:38 0:35:19 0:24:37 0:10:42 smithi main centos 9.stream rbd/mirror/{base/install clients/{mirror-extra mirror} cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-stupid supported-random-distro$/{centos_latest} workloads/rbd-mirror-workunit-policy-none} 2
pass 7285477 2023-05-24 19:40:44 2023-05-24 20:23:20 2023-05-24 20:52:16 0:28:56 0:19:16 0:09:40 smithi main centos 9.stream rbd/thrash/{base/install clusters/{fixed-2 openstack} msgr-failures/few objectstore/bluestore-comp-lz4 supported-random-distro$/{centos_latest} thrashers/cache thrashosds-health workloads/rbd_fsx_nocache} 2
pass 7285478 2023-05-24 19:40:45 2023-05-24 20:23:20 2023-05-24 21:37:40 1:14:20 1:03:33 0:10:47 smithi main centos 9.stream rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-stupid 4-supported-random-distro$/{centos_latest} 5-pool/none 6-prepare/qcow2-file 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup} 3
pass 7285479 2023-05-24 19:40:46 2023-05-24 20:24:41 2023-05-24 20:45:25 0:20:44 0:11:20 0:09:24 smithi main centos 9.stream rbd/basic/{base/install cachepool/none clusters/{fixed-1 openstack} msgr-failures/few objectstore/bluestore-stupid supported-random-distro$/{centos_latest} tasks/rbd_cls_tests} 1
fail 7285480 2023-05-24 19:40:47 2023-05-24 20:24:41 2023-05-24 21:27:30 1:02:49 0:53:24 0:09:25 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-comp-lz4 validator/memcheck workloads/rbd_mirror} 1
Failure Reason:

Command failed (workunit test rbd/test_rbd_mirror.sh) on smithi190 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2c06d7419cb6b639b0e699e672a277d5e998ef86 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 VALGRIND=\'--tool=memcheck --leak-check=full\' adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/test_rbd_mirror.sh'

pass 7285481 2023-05-24 19:40:48 2023-05-24 20:24:51 2023-05-24 20:49:47 0:24:56 0:16:17 0:08:39 smithi main centos 9.stream rbd/cli/{base/install clusters/{fixed-1 openstack} features/journaling msgr-failures/few objectstore/bluestore-low-osd-mem-target pool/none supported-random-distro$/{centos_latest} workloads/rbd_cli_groups} 1
pass 7285482 2023-05-24 19:40:49 2023-05-24 20:24:52 2023-05-24 20:50:50 0:25:58 0:15:27 0:10:31 smithi main centos 9.stream rbd/qemu/{cache/writethrough clusters/{fixed-3 openstack} features/readbalance msgr-failures/few objectstore/bluestore-bitmap pool/ec-cache-pool supported-random-distro$/{centos_latest} workloads/qemu_fsstress} 3
pass 7285483 2023-05-24 19:40:50 2023-05-24 20:25:52 2023-05-24 20:46:41 0:20:49 0:12:03 0:08:46 smithi main centos 9.stream rbd/basic/{base/install cachepool/small clusters/{fixed-1 openstack} msgr-failures/few objectstore/bluestore-bitmap supported-random-distro$/{centos_latest} tasks/rbd_cls_tests} 1
pass 7285484 2023-05-24 19:40:50 2023-05-24 20:25:53 2023-05-24 20:48:38 0:22:45 0:12:44 0:10:01 smithi main centos 9.stream rbd/singleton/{all/admin_socket objectstore/bluestore-hybrid openstack supported-random-distro$/{centos_latest}} 1
fail 7285485 2023-05-24 19:40:51 2023-05-24 20:25:53 2023-05-24 21:59:08 1:33:15 1:23:55 0:09:20 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-comp-lz4 validator/memcheck workloads/c_api_tests} 1
Failure Reason:

Command failed (workunit test rbd/test_librbd.sh) on smithi161 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2c06d7419cb6b639b0e699e672a277d5e998ef86 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 RBD_FEATURES=1 VALGRIND=\'--tool=memcheck --leak-check=full\' adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/test_librbd.sh'

pass 7285486 2023-05-24 19:40:52 2023-05-24 20:25:53 2023-05-24 21:33:44 1:07:51 0:58:20 0:09:31 smithi main centos 9.stream rbd/pwl-cache/tmpfs/{1-base/install 2-cluster/{fix-2 openstack} 3-supported-random-distro$/{centos_latest} 4-cache-path 5-cache-mode/rwl 6-cache-size/1G 7-workloads/qemu_xfstests} 2
pass 7285487 2023-05-24 19:40:53 2023-05-24 20:26:04 2023-05-24 21:06:50 0:40:46 0:30:56 0:09:50 smithi main centos 9.stream rbd/librbd/{cache/writethrough clusters/{fixed-3 openstack} config/copy-on-read min-compat-client/default msgr-failures/few objectstore/bluestore-hybrid pool/small-cache-pool supported-random-distro$/{centos_latest} workloads/c_api_tests_with_journaling} 3
pass 7285488 2023-05-24 19:40:54 2023-05-24 20:26:04 2023-05-24 20:49:32 0:23:28 0:13:09 0:10:19 smithi main centos 9.stream rbd/basic/{base/install cachepool/none clusters/{fixed-1 openstack} msgr-failures/few objectstore/bluestore-comp-lz4 supported-random-distro$/{centos_latest} tasks/rbd_lock_and_fence} 1
fail 7285489 2023-05-24 19:40:55 2023-05-24 20:26:05 2023-05-24 22:05:34 1:39:29 1:28:53 0:10:36 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-comp-snappy validator/memcheck workloads/c_api_tests_with_defaults} 1
Failure Reason:

Command failed (workunit test rbd/test_librbd.sh) on smithi062 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2c06d7419cb6b639b0e699e672a277d5e998ef86 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 RBD_FEATURES=61 VALGRIND=\'--tool=memcheck --leak-check=full\' adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/test_librbd.sh'

pass 7285490 2023-05-24 19:40:56 2023-05-24 20:26:05 2023-05-24 21:53:49 1:27:44 1:17:12 0:10:32 smithi main centos 9.stream rbd/maintenance/{base/install clusters/{fixed-3 openstack} objectstore/bluestore-stupid qemu/xfstests supported-random-distro$/{centos_latest} workloads/dynamic_features} 3
pass 7285491 2023-05-24 19:40:56 2023-05-24 20:26:26 2023-05-24 21:18:52 0:52:26 0:41:06 0:11:20 smithi main centos 9.stream rbd/mirror-thrash/{base/install clients/mirror cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-comp-zstd policy/none rbd-mirror/four-per-cluster supported-random-distro$/{centos_latest} workloads/rbd-mirror-fsx-workunit} 2
pass 7285492 2023-05-24 19:40:57 2023-05-24 20:26:46 2023-05-24 20:50:36 0:23:50 0:13:17 0:10:33 smithi main centos 9.stream rbd/nbd/{base/install cluster/{fixed-3 openstack} msgr-failures/few objectstore/bluestore-comp-zlib supported-random-distro$/{centos_latest} thrashers/default thrashosds-health workloads/rbd_nbd} 3
pass 7285493 2023-05-24 19:40:58 2023-05-24 20:26:57 2023-05-24 21:16:19 0:49:22 0:39:14 0:10:08 smithi main centos 9.stream rbd/thrash/{base/install clusters/{fixed-2 openstack} msgr-failures/few objectstore/bluestore-low-osd-mem-target supported-random-distro$/{centos_latest} thrashers/default thrashosds-health workloads/rbd_api_tests_journaling} 2
pass 7285494 2023-05-24 19:40:59 2023-05-24 20:26:57 2023-05-24 21:08:37 0:41:40 0:31:29 0:10:11 smithi main centos 9.stream rbd/immutable-object-cache/{clusters/{fix-2 openstack} pool/ceph_and_immutable_object_cache supported-random-distro$/{centos_latest} workloads/fio_on_immutable_object_cache} 2
pass 7285495 2023-05-24 19:41:00 2023-05-24 20:27:17 2023-05-24 22:11:25 1:44:08 1:32:24 0:11:44 smithi main centos 9.stream rbd/maintenance/{base/install clusters/{fixed-3 openstack} objectstore/bluestore-bitmap qemu/xfstests supported-random-distro$/{centos_latest} workloads/dynamic_features_no_cache} 3
fail 7285496 2023-05-24 19:41:01 2023-05-24 20:29:28 2023-05-24 21:30:34 1:01:06 0:50:29 0:10:37 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-comp-zlib validator/memcheck workloads/c_api_tests_with_journaling} 1
Failure Reason:

Command failed (workunit test rbd/test_librbd.sh) on smithi121 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2c06d7419cb6b639b0e699e672a277d5e998ef86 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 RBD_FEATURES=125 VALGRIND=\'--tool=memcheck --leak-check=full\' adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/test_librbd.sh'

pass 7285497 2023-05-24 19:41:01 2023-05-24 20:29:39 2023-05-24 21:51:11 1:21:32 1:08:03 0:13:29 smithi main centos 9.stream rbd/maintenance/{base/install clusters/{fixed-3 openstack} objectstore/bluestore-comp-lz4 qemu/xfstests supported-random-distro$/{centos_latest} workloads/rebuild_object_map} 3
pass 7285498 2023-05-24 19:41:02 2023-05-24 20:31:39 2023-05-24 21:55:10 1:23:31 1:11:34 0:11:57 smithi main centos 9.stream rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/bluestore-comp-zlib 4-supported-random-distro$/{centos_latest} 5-pool/replicated-data-pool 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup} 3
pass 7285499 2023-05-24 19:41:03 2023-05-24 21:04:07 1321 smithi main centos 9.stream rbd/valgrind/{base/install centos_latest clusters/{fixed-1 openstack} objectstore/bluestore-comp-zstd validator/memcheck workloads/fsx} 1
pass 7285500 2023-05-24 19:41:04 2023-05-24 20:33:00 2023-05-24 20:59:03 0:26:03 0:15:15 0:10:48 smithi main centos 9.stream rbd/cli_v1/{base/install clusters/{fixed-1 openstack} features/format-1 msgr-failures/few objectstore/bluestore-hybrid pool/small-cache-pool supported-random-distro$/{centos_latest} workloads/rbd_cli_import_export} 1
pass 7285501 2023-05-24 19:41:05 2023-05-24 20:33:51 2023-05-24 21:24:17 0:50:26 0:38:35 0:11:51 smithi main centos 9.stream rbd/pwl-cache/home/{1-base/install 2-cluster/{fix-2 openstack} 3-supported-random-distro$/{centos_latest} 4-cache-path 5-cache-mode/rwl 6-cache-size/1G 7-workloads/recovery} 2