ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
luminous
luminous
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/bluestore.yaml tasks/kernel_cfuse_workunits_dbench_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/auto-repair.yaml whitelist_health.yaml}
"2017-09-19 20:07:00.462205 mon.a mon.0 172.21.15.135:6789/0 526 : cluster [WRN] Health check failed: 1 MDSs are read only (MDS_READ_ONLY)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/default.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/backtrace.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_misc.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_o_trunc.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/client-limits.yaml whitelist_health.yaml}
"2017-09-19 19:34:54.462525 mon.a mon.0 172.21.15.82:6789/0 664 : cluster [WRN] MDS health message (mds.0): MDS cache is too large (521kB/1GB); 202 inodes in use by clients, 0 stray files" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_snaps.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mds.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/client-recovery.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_dbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/config-commands.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsstress.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsx.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/damage.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mon.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsync.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/data-scan.yaml whitelist_health.yaml}
Test failure: test_rebuild_simple_altpool (tasks.cephfs.test_data_scan.TestDataScan)
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_pjd.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/failover.yaml whitelist_health.yaml}
"2017-09-19 19:44:00.093122 mon.a mon.0 172.21.15.73:6789/0 457 : cluster [WRN] daemon mds.d is not responding, replacing it as rank 0 with standby daemon mds.b" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_trivial_sync.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_direct_io.yaml}
luminous
luminous
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/filestore-xfs.yaml tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/forward-scrub.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/default.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/journal-repair.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_misc.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_o_trunc.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/mds-flush.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_snaps.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mds.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/mds-full.yaml whitelist_health.yaml}
Command failed on smithi031 with status 1: 'sudo cp /var/lib/ceph/osd/ceph-1/fsid /tmp/tmpTCTf6s'
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
"2017-09-19 19:56:44.624079 mon.b mon.0 172.21.15.81:6789/0 272 : cluster [WRN] Health check failed: 1/3 mons down, quorum b,c (MON_DOWN)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/pool-perm.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsx.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/sessionmap.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mon.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsync.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/strays.yaml whitelist_health.yaml}
Test failure: test_snapshot_remove (tasks.cephfs.test_strays.TestStrays)
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/volume-client.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_trivial_sync.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
luminous
luminous
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/filestore-xfs.yaml tasks/kernel_cfuse_workunits_dbench_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/auto-repair.yaml whitelist_health.yaml}
"2017-09-19 20:02:19.458289 mon.a mon.0 172.21.15.11:6789/0 511 : cluster [WRN] Health check failed: 1 MDSs are read only (MDS_READ_ONLY)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/default.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/backtrace.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_misc.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_o_trunc.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/client-limits.yaml whitelist_health.yaml}
"2017-09-19 19:49:43.505163 mon.a mon.0 172.21.15.179:6789/0 644 : cluster [WRN] MDS health message (mds.0): MDS cache is too large (496kB/1GB); 202 inodes in use by clients, 0 stray files" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_snaps.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mds.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/client-recovery.yaml whitelist_health.yaml}
"2017-09-19 20:19:38.787698 mds.a mds.0 172.21.15.39:6805/2385220844 1 : cluster [WRN] evicting unresponsive client smithi074: (4315), after waiting 45 seconds during MDS startup" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_dbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/config-commands.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsstress.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsx.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/damage.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mon.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsync.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/data-scan.yaml whitelist_health.yaml}
Test failure: test_rebuild_simple_altpool (tasks.cephfs.test_data_scan.TestDataScan)
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_pjd.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/failover.yaml whitelist_health.yaml}
"2017-09-19 19:59:57.822448 mon.a mon.0 172.21.15.75:6789/0 461 : cluster [WRN] daemon mds.b is not responding, replacing it as rank 0 with standby daemon mds.d" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_trivial_sync.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_direct_io.yaml}
luminous
luminous
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/bluestore.yaml tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml}
"2017-09-19 20:06:52.388837 mon.b mon.0 172.21.15.7:6789/0 124 : cluster [WRN] Health check failed: 1/3 mons down, quorum b,c (MON_DOWN)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/forward-scrub.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/default.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
"2017-09-19 20:26:27.273134 mon.b mon.0 172.21.15.68:6789/0 176 : cluster [WRN] Health check failed: 1/3 mons down, quorum b,c (MON_DOWN)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/journal-repair.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_misc.yaml}
"2017-09-19 20:12:36.751773 mon.b mon.0 172.21.15.64:6789/0 133 : cluster [WRN] Health check failed: 1/3 mons down, quorum b,c (MON_DOWN)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_o_trunc.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/mds-flush.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_snaps.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mds.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_ffsb.yaml}
"2017-09-19 20:02:48.910455 osd.0 osd.0 172.21.15.132:6804/10219 124 : cluster [WRN] Monitor daemon marked osd.0 down, but it is still running" in cluster log
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/mds-full.yaml whitelist_health.yaml}
Command failed on smithi092 with status 1: 'sudo cp /var/lib/ceph/osd/ceph-0/fsid /tmp/tmpHWECqU'
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_ffsb.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/pool-perm.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsx.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/sessionmap.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mon.yaml thrashosds-health.yaml whitelist_health.yaml workloads/kclient_workunit_suites_iozone.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsync.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/strays.yaml whitelist_health.yaml}
Test failure: test_replicated_delete_speed (tasks.cephfs.test_strays.TestStrays)
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_iozone.yaml}
"2017-09-20 07:07:26.256841 mon.a mon.0 172.21.15.24:6789/0 171 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
luminous
luminous
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/volume-client.yaml whitelist_health.yaml}
luminous
luminous
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_trivial_sync.yaml}