ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/bluestore.yaml tasks/kernel_cfuse_workunits_dbench_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/auto-repair.yaml}
"2017-04-19 19:06:38.235408 mon.0 172.21.15.135:6789/0 498 : cluster [WRN] MDS health message (mds.0): MDS in read-only mode" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/default.yaml workloads/kclient_workunit_suites_ffsb.yaml}
Command failed on smithi134 with status 110: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 172.21.15.108:6789,172.21.15.168:6790,172.21.15.168:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/backtrace.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_misc.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_o_trunc.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/client-limits.yaml}
"2017-04-19 19:10:54.123736 mon.0 172.21.15.92:6789/0 632 : cluster [WRN] MDS health message (mds.0): Too many inodes in cache (162/100), 202 inodes in use by clients, 0 stray files" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_snaps.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mds.yaml workloads/kclient_workunit_suites_iozone.yaml}
"2017-04-19 19:08:29.805426 osd.2 172.21.15.192:6800/12414 163 : cluster [WRN] map e11 wrongly marked me down at e10" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/client-recovery.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_dbench.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/config-commands.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsstress.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsx.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/damage.yaml}
"2017-04-19 22:46:42.799684 mon.0 172.21.15.64:6789/0 358 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mon.yaml workloads/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsync.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/data-scan.yaml}
Test failure: test_rebuild_simple_altpool (tasks.cephfs.test_data_scan.TestDataScan)
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_pjd.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/failover.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_trivial_sync.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_direct_io.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/filestore-xfs.yaml tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/forward-scrub.yaml}
"2017-04-19 19:45:16.766535 mon.0 172.21.15.117:6789/0 442 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/default.yaml workloads/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/journal-repair.yaml}
"2017-04-19 19:14:11.674523 mon.0 172.21.15.73:6789/0 371 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_misc.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_o_trunc.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/mds-flush.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_snaps.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mds.yaml workloads/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/mds-full.yaml}
Test failure: test_full_different_file (tasks.cephfs.test_full.TestClusterFull)
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/pool-perm.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsx.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/sessionmap.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mon.yaml workloads/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsync.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/strays.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/volume-client.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_trivial_sync.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/filestore-xfs.yaml tasks/kernel_cfuse_workunits_dbench_iozone.yaml}
Command failed on smithi037 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph pg dump --format=json'
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/auto-repair.yaml}
"2017-04-19 19:32:34.549285 mon.0 172.21.15.82:6789/0 487 : cluster [WRN] MDS health message (mds.0): MDS in read-only mode" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/default.yaml workloads/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/backtrace.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_misc.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_o_trunc.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/client-limits.yaml}
"2017-04-19 20:28:11.825503 mon.0 172.21.15.158:6789/0 617 : cluster [WRN] MDS health message (mds.0): Too many inodes in cache (162/100), 202 inodes in use by clients, 0 stray files" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_snaps.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mds.yaml workloads/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/client-recovery.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_dbench.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/config-commands.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsstress.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsx.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/damage.yaml}
"2017-04-19 19:58:16.171628 mon.0 172.21.15.203:6789/0 356 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mon.yaml workloads/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsync.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/data-scan.yaml}
Test failure: test_rebuild_simple_altpool (tasks.cephfs.test_data_scan.TestDataScan)
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_pjd.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/failover.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_trivial_sync.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_direct_io.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/mixed-clients/{clusters/2-clients.yaml conf.yaml objectstore/bluestore.yaml tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/forward-scrub.yaml}
"2017-04-19 20:45:05.924591 mon.0 172.21.15.158:6789/0 447 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/default.yaml workloads/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_kernel_untar_build.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/journal-repair.yaml}
"2017-04-19 19:48:33.489620 mon.0 172.21.15.161:6789/0 369 : cluster [ERR] MDS health message (mds.0): Metadata damage detected" in cluster log
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_misc.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_o_trunc.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/mds-flush.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_snaps.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/filestore-xfs.yaml thrashers/mds.yaml workloads/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/mds-full.yaml}
Test failure: test_full_different_file (tasks.cephfs.test_full.TestClusterFull)
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_ffsb.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/pool-perm.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_fsx.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/sessionmap.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/thrash/{clusters/fixed-3-cephfs.yaml conf.yaml objectstore/bluestore.yaml thrashers/mon.yaml workloads/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsync.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/filestore-xfs.yaml tasks/strays.yaml}
Test failure: test_files_throttle (tasks.cephfs.test_strays.TestStrays)
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_suites_iozone.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/yes.yaml objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore/bluestore.yaml tasks/volume-client.yaml}
wip-jcsp-testing-20170419
wip-jcsp-testing-20170419
master
smithi
 
kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml inline/no.yaml objectstore/filestore-xfs.yaml tasks/kclient_workunit_trivial_sync.yaml}