ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
luminous
luminous
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/bluestore.yaml tasks/kernel_cfuse_workunits_dbench_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/auto-repair.yaml whitelist_health.yaml}
"2017-09-22 10:43:10.493167 mon.a mon.0 172.21.15.19:6789/0 528 : cluster [WRN] Health check failed: 1 MDSs are read only (MDS_READ_ONLY)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/default.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/backtrace.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_misc.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_o_trunc.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/client-limits.yaml whitelist_health.yaml}
"2017-09-22 11:50:40.802644 mon.a mon.0 172.21.15.21:6789/0 650 : cluster [WRN] MDS health message (mds.0): MDS cache is too large (496kB/1GB); 202 inodes in use by clients, 0 stray files" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_snaps.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mds.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/client-recovery.yaml whitelist_health.yaml}
"2017-09-22 10:50:34.811055 mds.b mds.0 172.21.15.17:6810/437783263 1 : cluster [WRN] evicting unresponsive client smithi060: (4325), after waiting 45 seconds during MDS startup" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_dbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/config-commands.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsstress.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsx.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/damage.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mon.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsync.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/data-scan.yaml whitelist_health.yaml}
Test failure: test_rebuild_simple_altpool (tasks.cephfs.test_data_scan.TestDataScan)
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_pjd.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/failover.yaml whitelist_health.yaml}
"2017-09-22 10:26:38.875174 mon.a mon.0 172.21.15.200:6789/0 459 : cluster [WRN] daemon mds.d is not responding, replacing it as rank 0 with standby daemon mds.b" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_trivial_sync.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_direct_io.yaml}
luminous
luminous
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/filestore-xfs.yaml tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/forward-scrub.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/default.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
"2017-09-22 10:31:21.222054 mon.b mon.0 172.21.15.70:6789/0 235 : cluster [WRN] overall HEALTH_WARN 1/3 mons down, quorum b,c" in cluster log
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/journal-repair.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_misc.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_o_trunc.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/mds-flush.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_snaps.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mds.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/mds-full.yaml whitelist_health.yaml}
Command failed on smithi156 with status 1: 'sudo cp /var/lib/ceph/osd/ceph-0/fsid /tmp/tmpSNQEa3'
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/pool-perm.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsx.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/sessionmap.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mon.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsync.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/strays.yaml whitelist_health.yaml}
Test failure: test_replicated_delete_speed (tasks.cephfs.test_strays.TestStrays)
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/volume-client.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_trivial_sync.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
luminous
luminous
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/filestore-xfs.yaml tasks/kernel_cfuse_workunits_dbench_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/auto-repair.yaml whitelist_health.yaml}
"2017-09-22 10:40:09.645444 mon.a mon.0 172.21.15.82:6789/0 510 : cluster [WRN] Health check failed: 1 MDSs are read only (MDS_READ_ONLY)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/default.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/backtrace.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_misc.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_o_trunc.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/client-limits.yaml whitelist_health.yaml}
"2017-09-22 11:37:00.947688 mon.a mon.0 172.21.15.115:6789/0 644 : cluster [WRN] MDS health message (mds.0): MDS cache is too large (496kB/1GB); 202 inodes in use by clients, 0 stray files" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_snaps.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mds.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/client-recovery.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_dbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/config-commands.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsstress.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsx.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/damage.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mon.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsync.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/data-scan.yaml whitelist_health.yaml}
Test failure: test_rebuild_simple_altpool (tasks.cephfs.test_data_scan.TestDataScan)
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_pjd.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/failover.yaml whitelist_health.yaml}
"2017-09-22 11:17:43.340010 mon.a mon.0 172.21.15.114:6789/0 464 : cluster [WRN] daemon mds.d is not responding, replacing it as rank 0 with standby daemon mds.b" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_trivial_sync.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_direct_io.yaml}
luminous
luminous
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/bluestore.yaml tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml}
"2017-09-22 11:03:54.749232 mon.b mon.0 172.21.15.47:6789/0 169 : cluster [WRN] Health check failed: 1/3 mons down, quorum b,c (MON_DOWN)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/forward-scrub.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/default.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/journal-repair.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_misc.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_o_trunc.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/mds-flush.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_snaps.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mds.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/mds-full.yaml whitelist_health.yaml}
Command failed on smithi009 with status 1: 'sudo cp /var/lib/ceph/osd/ceph-0/fsid /tmp/tmpEfFELU'
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/pool-perm.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsx.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/sessionmap.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mon.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsync.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/strays.yaml whitelist_health.yaml}
Test failure: test_replicated_delete_speed (tasks.cephfs.test_strays.TestStrays)
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/volume-client.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_trivial_sync.yaml}