ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/strays}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/pjd}}
Command failed (workunit test suites/pjd.sh) on smithi018 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
ubuntu 22.04
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} kclient-overrides/{distro/testing/k-testing ms-die-on-skipped} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_untarbuild_blogbench}
Command failed (workunit test kernel_untar_build.sh) on smithi151 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 CEPH_MNT=/home/ubuntu/cephtest/mnt.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/kernel_untar_build.sh'
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/valgrind/{begin/{0-install 1-ceph 2-logrotate} centos_latest debug mirror/{cephfs-mirror/one-per-cluster clients/mirror cluster/1-node mount/fuse overrides/whitelist_health tasks/mirror}}
Test failure: test_cephfs_mirror_restart_sync_on_blocklist (tasks.cephfs.test_mirroring.TestMirroring)
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/basic}}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/none objectstore-ec/bluestore-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mds 2-workunit/fs/snaps}}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/quota}
Command failed (workunit test fs/quota/quota.sh) on smithi191 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.3/client.3/tmp && cd -- /home/ubuntu/cephtest/mnt.3/client.3/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="3" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.3 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.3 CEPH_MNT=/home/ubuntu/cephtest/mnt.3 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.3/qa/workunits/fs/quota/quota.sh'
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/acls}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{no-subvolume} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/fs/misc}}
error during scrub thrashing: rank damage found: {'backtrace'}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/alternate-pool}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-comp omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/postgres}}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
ubuntu 20.04
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/3 tasks/fsstress validater/lockdep}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/dbench}}
Command failed (workunit test suites/dbench.sh) on smithi008 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/dbench.sh'
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/damage}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/none objectstore-ec/bluestore-comp-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/osd 2-workunit/fs/snaps}}
Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi031 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/data-scan}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/exports}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
ubuntu 20.04
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/5 tasks/dbench validater/valgrind}
saw valgrind issues
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/journal-repair}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{no-subvolume} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/fsstress}}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/fsx}}
error during scrub thrashing: rank damage found: {'backtrace'}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{centos_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/libcephfs/{frag test}}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
ubuntu 20.04
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
Command failed (workunit test fs/snaps/snaptest-multiple-capsnaps.sh) on smithi040 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh'
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/metrics}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{frag ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/iozone}}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/multimds_misc}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/pool-perm}
Test failure: test_pool_perm (tasks.cephfs.test_pool_perm.TestPoolPerm)
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/1 tasks/fsstress validater/lockdep}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/none objectstore-ec/bluestore-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mon 2-workunit/fs/snaps}}
Command failed (workunit test fs/snaps/snaptest-multiple-capsnaps.sh) on smithi099 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh'
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/basic}}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/pjd}}
Command failed (workunit test suites/pjd.sh) on smithi088 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/strays}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
rhel 8.6
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/3 tasks/dbench validater/valgrind}
wip-vshankar-testing-20230307.030510
wip-vshankar-testing-20230307.030510
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/quota}
Command failed (workunit test fs/quota/quota.sh) on smithi082 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=55559b38a6cb067750563a14060fd73cac29d57b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/quota/quota.sh'