ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
master
master
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/bluestore.yaml tasks/kernel_cfuse_workunits_dbench_iozone.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/auto-repair.yaml}
"2017-05-08 14:29:34.393891 mon.0 172.21.15.9:6789/0 460 : cluster [WRN] MDS health message (mds.0): MDS in read-only mode" in cluster log
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/default.yaml workloads/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/backtrace.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_misc.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_o_trunc.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/client-limits.yaml}
"2017-05-08 13:48:56.521192 mon.0 172.21.15.91:6789/0 601 : cluster [WRN] MDS health message (mds.0): Too many inodes in cache (163/100), 153 inodes in use by clients, 0 stray files" in cluster log
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_snaps.yaml}
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mds.yaml workloads/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/client-recovery.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_dbench.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/config-commands.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsstress.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsx.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/damage.yaml}
"2017-05-08 13:43:54.886051 mon.0 172.21.15.105:6789/0 347 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mon.yaml workloads/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsync.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/data-scan.yaml}
Test failure: test_rebuild_simple_altpool (tasks.cephfs.test_data_scan.TestDataScan)
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_pjd.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/failover.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_trivial_sync.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_direct_io.yaml}
master
master
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/filestore-xfs.yaml tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/forward-scrub.yaml}
"2017-05-08 14:17:17.792743 mon.0 172.21.15.97:6789/0 417 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/default.yaml workloads/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/journal-repair.yaml}
"2017-05-08 14:46:34.026608 mon.0 172.21.15.73:6789/0 359 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_misc.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_o_trunc.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/mds-flush.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_snaps.yaml}
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mds.yaml workloads/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/mds-full.yaml}
Test failure: test_full_different_file (tasks.cephfs.test_full.TestClusterFull)
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/pool-perm.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsx.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/sessionmap.yaml}
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mon.yaml workloads/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsync.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/strays.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/volume-client.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_trivial_sync.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
master
master
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/filestore-xfs.yaml tasks/kernel_cfuse_workunits_dbench_iozone.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/auto-repair.yaml}
"2017-05-08 15:25:31.065114 mon.0 172.21.15.150:6789/0 457 : cluster [WRN] MDS health message (mds.0): MDS in read-only mode" in cluster log
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/default.yaml workloads/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/backtrace.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_misc.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_o_trunc.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/client-limits.yaml}
"2017-05-08 14:08:08.630082 mon.0 172.21.15.64:6789/0 582 : cluster [WRN] MDS health message (mds.0): Too many inodes in cache (211/100), 202 inodes in use by clients, 0 stray files" in cluster log
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_snaps.yaml}
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mds.yaml workloads/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/client-recovery.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_dbench.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/config-commands.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsstress.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsx.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/damage.yaml}
"2017-05-08 14:42:43.190531 mon.0 172.21.15.70:6789/0 340 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mon.yaml workloads/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsync.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/data-scan.yaml}
Test failure: test_rebuild_simple_altpool (tasks.cephfs.test_data_scan.TestDataScan)
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_pjd.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/failover.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_trivial_sync.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_direct_io.yaml}
master
master
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/bluestore.yaml tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/forward-scrub.yaml}
"2017-05-08 14:16:33.075852 mon.0 172.21.15.153:6789/0 398 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/default.yaml workloads/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/journal-repair.yaml}
"2017-05-08 14:13:43.508310 mon.0 172.21.15.114:6789/0 362 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_misc.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_o_trunc.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/mds-flush.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_snaps.yaml}
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mds.yaml workloads/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/mds-full.yaml}
Test failure: test_full_different_file (tasks.cephfs.test_full.TestClusterFull)
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_ffsb.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/pool-perm.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsx.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/sessionmap.yaml}
master
master
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mon.yaml workloads/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsync.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/strays.yaml}
Test failure: test_purge_queue_op_rate (tasks.cephfs.test_strays.TestStrays)
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_iozone.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
master
master
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/volume-client.yaml}
master
master
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_trivial_sync.yaml}