ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml supported/ubuntu_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/dump-stuck.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_seq_read.yaml}
Command failed on smithi037 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 1'
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_workunit_loadgen_mix.yaml}
"2018-02-19 15:20:09.479680 mon.a mon.0 172.21.15.51:6789/0 144 : cluster [WRN] Health check failed: 6 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/objectstore/objectstore.yaml
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/multimon/{clusters/3.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
"2018-02-19 15:40:09.429309 mon.a mon.0 172.21.15.38:6789/0 66 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_api_tests.yaml validater/lockdep.yaml}
"2018-02-19 15:29:35.864681 mon.a mon.0 172.21.15.50:6789/0 174 : cluster [WRN] Health check failed: 6 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/erasure-code-nonregression.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
centos 
rados/singleton-nomsgr/{all/valgrind-leaks.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_rand_read.yaml}
Command failed on smithi003 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 1'
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/prometheus.yaml}
"2018-02-19 15:45:56.578946 mon.b mon.0 172.21.15.172:6789/0 90 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_workunit_loadgen_mostlyread.yaml}
"2018-02-19 15:36:33.579706 mon.b mon.0 172.21.15.5:6789/0 120 : cluster [WRN] Health check failed: 6 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_5925.yaml}
"2018-02-19 15:32:02.082625 mon.f mon.0 172.21.15.25:6789/0 188 : cluster [WRN] Health check failed: 4 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/lost-unfound.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-mon.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/objectstore/alloc-hint.yaml
"2018-02-19 15:40:00.932040 mon.a mon.0 172.21.15.164:6789/0 74 : cluster [WRN] Health check failed: Reduced data availability: 8 pgs inactive (PG_AVAILABILITY)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_seq_read.yaml}
Command failed on smithi028 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 1'
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/rest/mgr-restful.yaml
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton-nomsgr/{all/admin_socket_output.yaml rados.yaml}
"2018-02-19 15:44:40.645544 mon.a mon.0 172.21.15.103:6789/0 98 : cluster [WRN] Health check failed: Reduced data availability: 8 pgs inactive (PG_AVAILABILITY)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/standalone/crush.yaml
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/upgrade/luminous-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/luminous.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-mimic.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} thrashosds-health.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
centos 7.4
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml supported/centos_latest.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/readwrite.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-primary.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-replica.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
SSH connection to smithi030 was lost: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph pg dump --format=json'
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_write.yaml}
Command failed on smithi196 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 1'
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton-nomsgr/{all/cache-fs-trunc.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/multimon/{clusters/6.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml}
SSH connection to smithi026 was lost: "sudo find /var/log/ceph -name '*.log' -print0 | sudo xargs -0 --no-run-if-empty -- gzip --"
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_cls_all.yaml validater/valgrind.yaml}
reraise() argument after * must be an iterable, not NoneType
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/mon-auth-caps.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/many.yaml workloads/rados_api_tests.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore.yaml tasks/workunits.yaml}
"2018-02-19 16:06:02.531899 mon.b mon.0 172.21.15.144:6789/0 65 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/repair_test.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/objectstore/ceph_objectstore_tool.yaml
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/mon-config-keys.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/sample_fio.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/mon-seesaw.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton-nomsgr/{all/ceph-kvstore-tool.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/standalone/erasure-code.yaml
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Command crashed: 'sudo MALLOC_CHECK_=3 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph-osd --cluster ceph --mkfs --mkkey -i 6 --monmap /home/ubuntu/cephtest/ceph.monmap'
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml supported/ubuntu_latest.yaml thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rgw_snaps.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/osd-backfill.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/sample_radosbench.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/objectstore/filejournal.yaml
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/singleton-nomsgr/{all/ceph-post-file.yaml rados.yaml}
wip-sage2-testing-2018-02-19-0720
wip-sage2-testing-2018-02-19-0720
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.