ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/osd-backfill.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/one.yaml workloads/pool-create-delete.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/alloc-hint.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/rest/mgr-restful.yaml
"2018-04-30 22:52:57.707623 mon.a mon.0 172.21.15.78:6789/0 83 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/admin_socket_output.yaml rados.yaml}
"2018-04-30 22:54:23.418255 mon.a mon.0 172.21.15.39:6789/0 70 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/standalone/crush.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/upgrade/luminous-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/luminous.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-mimic.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} thrashosds-health.yaml}
failed to recover before timeout expired
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_rw.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2018-04-30 23:02:43.425923 osd.2 osd.2 172.21.15.17:6818/32880 19 : cluster [ERR] 2.1 : soid 2:81e07275:::smithi01715976-180:head data_digest 0x5449a508 != data_digest 0x864e7398 from shard 2" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_workunit_loadgen_big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/module_selftest.yaml}
"2018-04-30 23:45:44.098221 mon.a mon.0 172.21.15.2:6789/0 67 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
rhel 7.5
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported/rhel_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
{'smithi197.front.sepia.ceph.com': {'attempts': 5, 'changed': True, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}, 'smithi205.front.sepia.ceph.com': {'attempts': 5, 'changed': True, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/osd-recovery.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_write.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2018-04-30 23:54:44.940951 osd.6 osd.6 172.21.15.43:6800/12960 320 : cluster [ERR] 3.b missing primary copy of 3:d68f9757:::benchmark_data_smithi159_32230_object34:head, unfound" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rbd_cls.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/peer.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/cache-fs-trunc.yaml rados.yaml}
"2018-04-30 23:48:26.552759 mon.a mon.0 172.21.15.57:6789/0 83 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_workunit_loadgen_mix.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/pg-removal-interruption.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/ceph_objectstore_tool.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_rand_read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/multimon/{clusters/9.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_clock_no_skews.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/radostool.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/sync-many.yaml workloads/rados_5925.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/ceph-kvstore-tool.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_workunit_loadgen_mostlyread.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_seq_read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/standalone/erasure-code.yaml
Command failed (workunit test erasure-code/test-erasure-eio.sh) on smithi017 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-partial-recovery-20180430 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/erasure-code/test-erasure-eio.sh'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/random-eio.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/lockdep.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml tasks/prometheus.yaml}
"2018-05-01 00:01:04.841101 mon.a mon.0 172.21.15.15:6789/0 69 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/rebuild-mondb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} supported/ubuntu_latest.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_rand_read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/filejournal.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/recovery-preemption.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/ceph-post-file.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/readwrite.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 10000 --objects 6600 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 1200 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/reg11184.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
SSH connection to smithi114 was lost: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read-overwrites.yaml}
"2018-05-01 00:10:54.985316 osd.4 osd.4 172.21.15.57:6800/16574 39 : cluster [ERR] Error -2 reading object 2:1bbafdf8:::smithi20114822-403 oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo:head" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_seq_read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/sync.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/resolve_stuck_peering.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/repair_test.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/export-after-evict.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/multimon/{clusters/9.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_write.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/filestore-idempotent-aio-journal.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/standalone/misc.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml}
"2018-05-01 00:15:55.963259 osd.7 osd.7 172.21.15.116:6812/12459 58 : cluster [ERR] 2.15s0 missing primary copy of 2:aa6bc2eb:::benchmark_data_smithi125_14610_object18:head, will try copies on 2(2),4(1)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/workunits.yaml}
"2018-05-01 00:15:21.970958 mon.a mon.0 172.21.15.26:6789/0 71 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/thrash-eio.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
centos 7.4
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported/centos_latest.yaml thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=2 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_cls_all.yaml validater/valgrind.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/sample_fio.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rgw_snaps.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/thrash-rados/{thrash-rados.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/full-tiering.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/thrash_cache_writeback_proxy_none.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_mon_osdmap_prune.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/sample_radosbench.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2018-05-01 00:31:08.391500 osd.2 osd.2 172.21.15.154:6813/12884 260 : cluster [ERR] 5.28 missing primary copy of 5:17a68261:::benchmark_data_smithi154_56211_object2823:head, unfound" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/filestore-idempotent.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/watch-notify-same-primary.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 50 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/scrub_test.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/health-warnings.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_chunk --low_tier_pool low_tier --max-ops 4000 --objects 300 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
"2018-05-01 00:34:58.250153 osd.3 osd.3 172.21.15.37:6812/12725 327 : cluster [ERR] Error -2 reading object 2:bc59d76e:::smithi03714684-980:17" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/admin-socket.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --write-fadvise-dontneed --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore.yaml tasks/dashboard.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/cosbench_64K_read_write.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi083 with status 252: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-partial-recovery-20180430 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/standalone/mon.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/divergent_priors.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/multimon/{clusters/21.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
rhel 7.5
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} supported/rhel_latest.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml}
"2018-05-01 00:36:52.510366 osd.5 osd.5 172.21.15.39:6800/12736 170 : cluster [ERR] 3.0 : soid 3:04765b2b:::smithi19816171-54:head data_digest 0x43603094 != data_digest 0x772b4d36 from shard 5" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/divergent_priors2.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/large-omap-object-warnings.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/cosbench_64K_write.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2018-05-01 00:44:55.751921 osd.3 osd.3 172.21.15.119:6812/12818 460 : cluster [ERR] 2.66 missing primary copy of 2:669460aa:::smithi11916110-30:1, unfound" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/many.yaml workloads/rados_mon_workunits.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/fusestore.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml validater/lockdep.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2018-05-01 00:51:38.725205 osd.6 osd.6 172.21.15.188:6808/12863 533 : cluster [ERR] 6.26 5 tried to pull 6:653616e1:::benchmark_data_smithi035_79362_object158:head but got (2) No such file or directory" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml}
"2018-05-01 00:38:36.893043 osd.3 osd.3 172.21.15.112:6813/12690 169 : cluster [ERR] 2.2 : soid 2:6cb639f2:::smithi11216359-328 low_tier:head data_digest 0x5681d562 != data_digest 0xa6d8f318 from shard 3" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rbd_cls.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/dump-stuck.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/fio_4K_rand_read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2018-05-01 00:45:07.502839 osd.5 osd.5 172.21.15.93:6813/19678 107 : cluster [ERR] 2.3c : soid 2:3d20e30e:::smithi00814248-471 oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo:head data_digest 0x51fe7bb9 != data_digest 0x8a61aec7 from shard 5" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_cls_all.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --write-fadvise-dontneed --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/msgr.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/erasure-code-nonregression.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/failover.yaml}
"2018-05-01 00:39:10.986607 mon.b mon.0 172.21.15.90:6789/0 71 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/fio_4K_rand_rw.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2018-05-01 00:45:02.920301 mon.a mon.0 172.21.15.153:6789/0 324 : cluster [WRN] Health check failed: 1 slow ops, oldest one blocked for 33 sec (SLOW_OPS)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/keyvaluedb.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/standalone/osd.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_python.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
failed to recover before timeout expired
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/lost-unfound.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/multi-backfill-reject.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/one.yaml workloads/snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported/ubuntu_latest.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
"2018-05-01 00:52:03.996139 osd.0 osd.0 172.21.15.28:6801/12898 148 : cluster [ERR] 3.16 : soid 3:6ba4015d:::smithi02815443-257 oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo:head data_digest 0xfed84bd1 != data_digest 0x6322c9e8 from shard 0" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/multimon/{clusters/3.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_clock_no_skews.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-mon.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_stress_watch.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-primary.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_rw.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/pool-access.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/objectcacher-stress.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-replica.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml tasks/module_selftest.yaml}
"2018-05-01 00:54:34.464688 mon.a mon.0 172.21.15.101:6789/0 95 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_write.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/mon-auth-caps.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_striper.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/sync-many.yaml workloads/pool-create-delete.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/standalone/scrub.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/mon-config-keys.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton-nomsgr/{all/recovery-unfound-found.yaml rados.yaml}
"2018-05-01 01:10:57.277024 osd.0 osd.0 172.21.15.131:6801/13234 7 : cluster [ERR] 2.0 shard 0: soid 2:002fb70e:::benchmark_data_smithi131_13369_object3791:head data_digest 0x6706be76 != data_digest 0xa6525ea7 from auth oi 2:002fb70e:::benchmark_data_smithi131_13369_object3791:head(16'1999 client.4230.0:3792 dirty|data_digest s 4096 uv 1999 dd a6525ea7 alloc_hint [4096 4096 53])" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --write-fadvise-dontneed --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_rand_read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
centos 7.4
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} supported/centos_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/mon-config.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/objectstore/objectstore.yaml
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_workunit_loadgen_big.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/mon-seesaw.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2018-05-01 01:14:25.210642 mon.a mon.0 172.21.15.157:6789/0 21 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/multimon/{clusters/6.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_seq_read.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
centos 
rados/singleton-nomsgr/{all/valgrind-leaks.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_cls_all.yaml validater/lockdep.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2018-05-01 01:08:01.742311 osd.3 osd.3 172.21.15.194:6809/13146 99 : cluster [ERR] 2.25 missing primary copy of 2:a4d218dd:::benchmark_data_smithi194_15789_object25:head, will try copies on 6" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/prometheus.yaml}
"2018-05-01 01:05:00.000514 mon.b mon.0 172.21.15.52:6789/0 60 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/singleton/{all/osd-backfill.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 50 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_chunk --low_tier_pool low_tier --max-ops 4000 --objects 300 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-partial-recovery-20180430
wip-partial-recovery-20180430
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_workunit_loadgen_mix.yaml}