ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/volumes/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} tasks/volumes/{overrides test/snapshot}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-quiesce/no 6-workunit/suites/pjd}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-quiesce/no 6-workunit/fs/misc}}
error during scrub thrashing: rank damage found: {'backtrace'}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-quiesce/with-quiesce 6-workunit/suites/blogbench}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-quiesce/no 6-workunit/suites/dbench}}
"2024-05-10T10:53:14.227143+0000 mds.i (mds.0) 89 : cluster [WRN] Scrub error on inode 0x1000000020c (/volumes/qa/sv_0/ff042e15-a8a5-4cf9-ad0e-3b27fb2aed93/client.0/tmp/clients/client0/~dmtmp/EXCEL) see mds.i log and `damage ls` output for details" in cluster log
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-quiesce/with-quiesce 6-workunit/suites/ffsb}}
Command failed (workunit test suites/ffsb.sh) on smithi144 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d39fa08867836ac070c47cb66c782fa559fd480b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/ffsb.sh'
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-quiesce/no 6-workunit/suites/fsx}}
"2024-05-10T10:43:55.975814+0000 mds.c (mds.1) 5 : cluster [WRN] Scrub error on inode 0x100000002bb (/volumes/qa/sv_0/18bfb460-85d5-4334-ae0a-f99cd839dfc2/client.0/tmp/xfstests-dev/ltp) see mds.c log and `damage ls` output for details" in cluster log
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-quiesce/no 6-workunit/fs/test_o_trunc}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-quiesce/no 6-workunit/suites/iozone}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/volumes/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} tasks/volumes/{overrides test/basic}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
reached maximum tries (51) after waiting for 300 seconds
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} subvol_versions/create_subvol_version_v1 tasks/forward-scrub}
"2024-05-10T10:39:18.036714+0000 mds.a (mds.0) 1 : cluster [ERR] dir 0x10000000000 object missing on disk; some files may be lost (/dir)" in cluster log
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/fs/misc}}
error during quiesce thrashing: Error releasing set 'd40b173e': 1 (EPERM)
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-quiesce/no 6-workunit/kernel_untar_build}}
"2024-05-10T11:02:33.700777+0000 mds.b (mds.0) 54 : cluster [WRN] Scrub error on inode 0x10000006f7d (/volumes/qa/sv_0/7dbcc5d2-e99f-48aa-8164-b606cac50b59/client.0/tmp/t/linux-6.5.11/block/partitions) see mds.b log and `damage ls` output for details" in cluster log
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/postgres}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-quiesce/no 6-workunit/suites/blogbench}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/suites/dbench}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/fs/norstats}}
error during quiesce thrashing: Error releasing set '25edc73': 1 (EPERM)
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/fs/test_o_trunc}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/suites/iozone}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-quiesce/no 6-workunit/fs/misc}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/kernel_untar_build}}
error during quiesce thrashing: Error quiescing set '99199007': 110 (ETIMEDOUT)
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/volumes/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} tasks/volumes/{overrides test/clone}}
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/suites/blogbench}}
error during quiesce thrashing: Error releasing set 'b4baa38': 1 (EPERM)
wip-63896-from-wip-rishabh-testing-20240501.193033
wip-63896-from-wip-rishabh-testing-20240501.193033
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-quiesce/no 6-workunit/suites/dbench}}
Command failed (workunit test suites/dbench.sh) on smithi027 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d39fa08867836ac070c47cb66c782fa559fd480b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/dbench.sh'