Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 2456838 2018-04-30 18:23:31 2018-04-30 22:37:04 2018-04-30 23:01:03 0:23:59 0:13:04 0:10:55 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
pass 2456839 2018-04-30 18:23:34 2018-04-30 22:37:39 2018-04-30 23:15:39 0:38:00 0:24:25 0:13:35 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml} 3
fail 2456840 2018-04-30 18:23:36 2018-04-30 22:38:25 2018-04-30 23:08:25 0:30:00 0:19:10 0:10:50 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'

pass 2456841 2018-04-30 18:23:38 2018-04-30 22:39:17 2018-04-30 23:01:16 0:21:59 0:12:21 0:09:38 smithi master rados/singleton/{all/osd-backfill.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
fail 2456842 2018-04-30 18:23:41 2018-04-30 22:40:28 2018-04-30 23:08:27 0:27:59 0:18:18 0:09:41 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'

pass 2456843 2018-04-30 18:23:43 2018-04-30 22:40:29 2018-04-30 23:12:28 0:31:59 0:21:57 0:10:02 smithi master rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
pass 2456844 2018-04-30 18:23:44 2018-04-30 22:40:29 2018-04-30 23:04:28 0:23:59 0:15:09 0:08:50 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
pass 2456845 2018-04-30 18:23:46 2018-04-30 22:40:28 2018-04-30 23:04:28 0:24:00 0:15:02 0:08:58 smithi master rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/one.yaml workloads/pool-create-delete.yaml} 2
pass 2456846 2018-04-30 18:23:47 2018-04-30 22:40:28 2018-04-30 22:54:27 0:13:59 0:05:56 0:08:03 smithi master rados/objectstore/alloc-hint.yaml 1
fail 2456847 2018-04-30 18:23:48 2018-04-30 22:40:33 2018-04-30 22:56:32 0:15:59 0:06:01 0:09:58 smithi master rados/rest/mgr-restful.yaml 1
Failure Reason:

"2018-04-30 22:52:57.707623 mon.a mon.0 172.21.15.78:6789/0 83 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

fail 2456848 2018-04-30 18:23:50 2018-04-30 22:40:41 2018-04-30 23:00:41 0:20:00 0:10:22 0:09:38 smithi master rados/singleton-nomsgr/{all/admin_socket_output.yaml rados.yaml} 1
Failure Reason:

"2018-04-30 22:54:23.418255 mon.a mon.0 172.21.15.39:6789/0 70 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

pass 2456849 2018-04-30 18:23:51 2018-04-30 22:40:50 2018-04-30 22:54:49 0:13:59 0:06:15 0:07:44 smithi master rados/standalone/crush.yaml 1
pass 2456850 2018-04-30 18:23:54 2018-04-30 22:41:07 2018-04-30 23:05:07 0:24:00 0:13:10 0:10:50 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 2456851 2018-04-30 18:23:55 2018-04-30 22:41:07 2018-04-30 23:17:07 0:36:00 0:24:15 0:11:45 smithi master rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml} 2
pass 2456852 2018-04-30 18:23:57 2018-04-30 22:41:08 2018-04-30 23:03:07 0:21:59 0:08:58 0:13:01 smithi master rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml} 3
pass 2456853 2018-04-30 18:23:58 2018-04-30 22:41:57 2018-04-30 23:05:56 0:23:59 0:09:47 0:14:12 smithi master rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
fail 2456854 2018-04-30 18:23:59 2018-04-30 22:42:16 2018-05-01 00:32:18 1:50:02 1:38:54 0:11:08 smithi master rados/upgrade/luminous-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/luminous.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-mimic.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} thrashosds-health.yaml} 3
Failure Reason:

failed to recover before timeout expired

pass 2456855 2018-04-30 18:24:01 2018-04-30 22:42:50 2018-04-30 23:14:50 0:32:00 0:23:44 0:08:16 smithi master centos rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml} 2
pass 2456856 2018-04-30 18:24:04 2018-04-30 22:43:16 2018-04-30 23:01:15 0:17:59 0:09:43 0:08:16 smithi master rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_rw.yaml} 1
pass 2456857 2018-04-30 18:24:05 2018-04-30 22:43:52 2018-04-30 23:05:52 0:22:00 0:12:53 0:09:07 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
fail 2456858 2018-04-30 18:24:06 2018-04-30 22:44:28 2018-04-30 23:06:28 0:22:00 0:11:45 0:10:15 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
Failure Reason:

"2018-04-30 23:02:43.425923 osd.2 osd.2 172.21.15.17:6818/32880 19 : cluster [ERR] 2.1 : soid 2:81e07275:::smithi01715976-180:head data_digest 0x5449a508 != data_digest 0x864e7398 from shard 2" in cluster log

pass 2456859 2018-04-30 18:24:08 2018-04-30 22:44:29 2018-04-30 23:24:28 0:39:59 0:28:48 0:11:11 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_workunit_loadgen_big.yaml} 2
pass 2456860 2018-04-30 18:24:09 2018-04-30 23:30:30 2018-04-30 23:50:29 0:19:59 0:11:44 0:08:15 smithi master rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml} 1
fail 2456861 2018-04-30 18:24:11 2018-04-30 23:30:30 2018-04-30 23:50:29 0:19:59 0:08:34 0:11:25 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2456862 2018-04-30 18:24:12 2018-04-30 23:30:41 2018-04-30 23:52:41 0:22:00 0:09:45 0:12:15 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2456863 2018-04-30 18:24:13 2018-04-30 23:32:23 2018-05-01 00:26:24 0:54:01 0:42:04 0:11:57 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2456864 2018-04-30 18:24:14 2018-04-30 23:32:30 2018-04-30 23:54:29 0:21:59 0:11:36 0:10:23 smithi master rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/module_selftest.yaml} 2
Failure Reason:

"2018-04-30 23:45:44.098221 mon.a mon.0 172.21.15.2:6789/0 67 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

fail 2456865 2018-04-30 18:24:15 2018-04-30 23:32:59 2018-04-30 23:40:58 0:07:59 0:01:57 0:06:02 smithi master rhel 7.5 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported/rhel_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
Failure Reason:

{'smithi197.front.sepia.ceph.com': {'attempts': 5, 'changed': True, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}, 'smithi205.front.sepia.ceph.com': {'attempts': 5, 'changed': True, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}}

pass 2456866 2018-04-30 18:24:16 2018-04-30 23:33:38 2018-04-30 23:53:38 0:20:00 0:08:35 0:11:25 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml} 2
pass 2456867 2018-04-30 18:24:18 2018-04-30 23:33:44 2018-04-30 23:51:43 0:17:59 0:09:08 0:08:51 smithi master rados/singleton/{all/osd-recovery.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml} 1
pass 2456868 2018-04-30 18:24:19 2018-04-30 23:33:54 2018-05-01 00:11:54 0:38:00 0:27:54 0:10:06 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
pass 2456869 2018-04-30 18:24:20 2018-04-30 23:34:18 2018-05-01 00:08:18 0:34:00 0:22:16 0:11:44 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 2456870 2018-04-30 18:24:22 2018-04-30 23:34:21 2018-04-30 23:52:21 0:18:00 0:09:10 0:08:50 smithi master rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_write.yaml} 1
fail 2456871 2018-04-30 18:24:23 2018-04-30 23:34:34 2018-05-01 00:24:34 0:50:00 0:39:59 0:10:01 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
Failure Reason:

"2018-04-30 23:54:44.940951 osd.6 osd.6 172.21.15.43:6800/12960 320 : cluster [ERR] 3.b missing primary copy of 3:d68f9757:::benchmark_data_smithi159_32230_object34:head, unfound" in cluster log

pass 2456872 2018-04-30 18:24:24 2018-04-30 23:35:23 2018-04-30 23:57:22 0:21:59 0:11:08 0:10:51 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
pass 2456873 2018-04-30 18:24:25 2018-04-30 23:35:28 2018-04-30 23:57:28 0:22:00 0:08:59 0:13:01 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rbd_cls.yaml} 3
pass 2456874 2018-04-30 18:24:26 2018-04-30 23:35:50 2018-04-30 23:51:49 0:15:59 0:06:26 0:09:33 smithi master rados/singleton/{all/peer.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml} 1
fail 2456875 2018-04-30 18:24:27 2018-04-30 23:36:03 2018-04-30 23:52:02 0:15:59 0:07:41 0:08:18 smithi master rados/singleton-nomsgr/{all/cache-fs-trunc.yaml rados.yaml} 1
Failure Reason:

"2018-04-30 23:48:26.552759 mon.a mon.0 172.21.15.57:6789/0 83 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

pass 2456876 2018-04-30 18:24:28 2018-04-30 23:36:35 2018-05-01 00:10:35 0:34:00 0:24:33 0:09:27 smithi master rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml} 2
pass 2456877 2018-04-30 18:24:28 2018-04-30 23:36:36 2018-05-01 00:00:35 0:23:59 0:10:51 0:13:08 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
pass 2456878 2018-04-30 18:24:29 2018-04-30 23:36:36 2018-05-01 00:00:35 0:23:59 0:12:20 0:11:39 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
pass 2456879 2018-04-30 18:24:30 2018-04-30 23:36:35 2018-05-01 00:02:35 0:26:00 0:17:05 0:08:55 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_workunit_loadgen_mix.yaml} 2
pass 2456880 2018-04-30 18:24:31 2018-04-30 23:36:35 2018-05-01 00:00:35 0:24:00 0:13:16 0:10:44 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
fail 2456881 2018-04-30 18:24:32 2018-04-30 23:36:35 2018-04-30 23:58:35 0:22:00 0:11:41 0:10:19 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'

pass 2456882 2018-04-30 18:24:33 2018-04-30 23:36:58 2018-04-30 23:52:58 0:16:00 0:06:49 0:09:11 smithi master rados/singleton/{all/pg-removal-interruption.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
pass 2456883 2018-04-30 18:24:34 2018-04-30 23:37:03 2018-05-01 00:15:04 0:38:01 0:26:10 0:11:51 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
pass 2456884 2018-04-30 18:24:34 2018-04-30 23:37:18 2018-04-30 23:59:18 0:22:00 0:10:48 0:11:12 smithi master rados/objectstore/ceph_objectstore_tool.yaml 1
pass 2456885 2018-04-30 18:24:35 2018-04-30 23:40:27 2018-05-01 00:00:27 0:20:00 0:10:34 0:09:26 smithi master rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_rand_read.yaml} 1
pass 2456886 2018-04-30 18:24:36 2018-04-30 23:40:30 2018-05-01 00:04:30 0:24:00 0:11:58 0:12:02 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
pass 2456887 2018-04-30 18:24:37 2018-04-30 23:40:31 2018-04-30 23:58:30 0:17:59 0:07:16 0:10:43 smithi master rados/multimon/{clusters/9.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_clock_no_skews.yaml} 3
pass 2456888 2018-04-30 18:24:38 2018-04-30 23:40:34 2018-05-01 00:04:33 0:23:59 0:13:01 0:10:58 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 2456889 2018-04-30 18:24:39 2018-04-30 23:40:36 2018-05-01 00:12:36 0:32:00 0:20:39 0:11:21 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
fail 2456890 2018-04-30 18:24:40 2018-04-30 23:40:54 2018-05-01 00:02:53 0:21:59 0:09:29 0:12:30 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'

pass 2456891 2018-04-30 18:24:41 2018-04-30 23:40:59 2018-05-01 00:02:59 0:22:00 0:12:41 0:09:19 smithi master rados/singleton/{all/radostool.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml} 1
pass 2456892 2018-04-30 18:24:41 2018-04-30 23:41:23 2018-04-30 23:59:22 0:17:59 0:08:39 0:09:20 smithi master rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/sync-many.yaml workloads/rados_5925.yaml} 2
fail 2456893 2018-04-30 18:24:42 2018-04-30 23:42:54 2018-05-01 00:06:54 0:24:00 0:11:31 0:12:29 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2456894 2018-04-30 18:24:43 2018-04-30 23:43:03 2018-05-01 00:11:03 0:28:00 0:16:21 0:11:39 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
pass 2456895 2018-04-30 18:24:44 2018-04-30 23:43:05 2018-04-30 23:57:04 0:13:59 0:06:00 0:07:59 smithi master rados/singleton-nomsgr/{all/ceph-kvstore-tool.yaml rados.yaml} 1
pass 2456896 2018-04-30 18:24:45 2018-04-30 23:43:20 2018-05-01 00:13:20 0:30:00 0:19:32 0:10:28 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
pass 2456897 2018-04-30 18:24:46 2018-04-30 23:45:06 2018-05-01 00:17:05 0:31:59 0:21:10 0:10:49 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_workunit_loadgen_mostlyread.yaml} 2
pass 2456898 2018-04-30 18:24:47 2018-04-30 23:45:07 2018-05-01 00:03:07 0:18:00 0:09:30 0:08:30 smithi master rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_seq_read.yaml} 1
fail 2456899 2018-04-30 18:24:47 2018-04-30 23:45:09 2018-05-01 00:13:09 0:28:00 0:19:04 0:08:56 smithi master rados/standalone/erasure-code.yaml 1
Failure Reason:

Command failed (workunit test erasure-code/test-erasure-eio.sh) on smithi017 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-partial-recovery-20180430 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/erasure-code/test-erasure-eio.sh'

pass 2456900 2018-04-30 18:24:48 2018-04-30 23:45:11 2018-05-01 00:19:11 0:34:00 0:24:36 0:09:24 smithi master rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml} 2
pass 2456901 2018-04-30 18:24:49 2018-04-30 23:45:16 2018-05-01 00:23:16 0:38:00 0:27:05 0:10:55 smithi master rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml} 3
pass 2456902 2018-04-30 18:24:50 2018-04-30 23:46:24 2018-05-01 00:10:24 0:24:00 0:10:53 0:13:07 smithi master rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
fail 2456903 2018-04-30 18:24:51 2018-04-30 23:46:25 2018-05-01 00:04:24 0:17:59 0:08:14 0:09:45 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2456904 2018-04-30 18:24:51 2018-04-30 23:46:31 2018-05-01 00:12:31 0:26:00 0:14:56 0:11:04 smithi master rados/singleton/{all/random-eio.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml} 2
pass 2456905 2018-04-30 18:24:52 2018-04-30 23:46:31 2018-05-01 00:24:31 0:38:00 0:24:28 0:13:32 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 3
fail 2456906 2018-04-30 18:24:53 2018-04-30 23:46:32 2018-05-01 00:08:32 0:22:00 0:12:26 0:09:34 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'

pass 2456907 2018-04-30 18:24:54 2018-04-30 23:46:41 2018-05-01 00:18:41 0:32:00 0:18:29 0:13:31 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 2456908 2018-04-30 18:24:54 2018-04-30 23:47:09 2018-05-01 00:15:09 0:28:00 0:16:27 0:11:33 smithi master rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/lockdep.yaml} 2
fail 2456909 2018-04-30 18:24:55 2018-04-30 23:47:59 2018-05-01 00:05:59 0:18:00 0:07:46 0:10:14 smithi master rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml tasks/prometheus.yaml} 2
Failure Reason:

"2018-05-01 00:01:04.841101 mon.a mon.0 172.21.15.15:6789/0 69 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

pass 2456910 2018-04-30 18:24:56 2018-04-30 23:48:23 2018-05-01 00:18:23 0:30:00 0:20:07 0:09:53 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
pass 2456911 2018-04-30 18:24:57 2018-04-30 23:48:44 2018-05-01 00:08:43 0:19:59 0:10:26 0:09:33 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
pass 2456912 2018-04-30 18:24:57 2018-04-30 23:49:18 2018-05-01 00:05:17 0:15:59 0:08:06 0:07:53 smithi master rados/singleton/{all/rebuild-mondb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml} 1
pass 2456913 2018-04-30 18:24:58 2018-04-30 23:50:04 2018-05-01 00:16:04 0:26:00 0:14:55 0:11:05 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
pass 2456914 2018-04-30 18:24:59 2018-04-30 23:50:29 2018-05-01 00:24:29 0:34:00 0:24:38 0:09:22 smithi master ubuntu 16.04 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} supported/ubuntu_latest.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 2456915 2018-04-30 18:25:00 2018-04-30 23:50:29 2018-05-01 00:10:28 0:19:59 0:10:48 0:09:11 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
pass 2456916 2018-04-30 18:25:01 2018-04-30 23:50:29 2018-05-01 00:14:29 0:24:00 0:13:29 0:10:31 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
pass 2456917 2018-04-30 18:25:01 2018-04-30 23:50:30 2018-05-01 00:10:30 0:20:00 0:09:43 0:10:17 smithi master rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_rand_read.yaml} 1
pass 2456918 2018-04-30 18:25:02 2018-04-30 23:50:30 2018-05-01 00:06:30 0:16:00 0:06:37 0:09:23 smithi master rados/objectstore/filejournal.yaml 1
pass 2456919 2018-04-30 18:25:03 2018-04-30 23:50:31 2018-05-01 00:18:30 0:27:59 0:18:18 0:09:41 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
pass 2456920 2018-04-30 18:25:04 2018-04-30 23:50:35 2018-05-01 00:12:34 0:21:59 0:13:01 0:08:58 smithi master rados/singleton/{all/recovery-preemption.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
pass 2456921 2018-04-30 18:25:04 2018-04-30 23:50:37 2018-05-01 00:04:37 0:14:00 0:05:15 0:08:45 smithi master rados/singleton-nomsgr/{all/ceph-post-file.yaml rados.yaml} 1
pass 2456922 2018-04-30 18:25:05 2018-04-30 23:50:51 2018-05-01 00:26:50 0:35:59 0:27:01 0:08:58 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
pass 2456923 2018-04-30 18:25:06 2018-04-30 23:51:01 2018-05-01 00:11:00 0:19:59 0:11:24 0:08:35 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/readwrite.yaml} 2
pass 2456924 2018-04-30 18:25:06 2018-04-30 23:51:05 2018-05-01 00:21:06 0:30:01 0:22:41 0:07:20 smithi master rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml} 1
pass 2456925 2018-04-30 18:25:07 2018-04-30 23:51:06 2018-05-01 00:13:06 0:22:00 0:13:09 0:08:51 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
pass 2456926 2018-04-30 18:25:08 2018-04-30 23:51:10 2018-05-01 00:11:09 0:19:59 0:10:38 0:09:21 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
fail 2456927 2018-04-30 18:25:09 2018-04-30 23:51:55 2018-05-01 00:15:54 0:23:59 0:13:06 0:10:53 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 10000 --objects 6600 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 1200 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'

pass 2456928 2018-04-30 18:25:09 2018-04-30 23:51:55 2018-05-01 00:09:54 0:17:59 0:08:23 0:09:36 smithi master rados/singleton/{all/reg11184.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml} 1
pass 2456929 2018-04-30 18:25:10 2018-04-30 23:52:05 2018-05-01 00:12:04 0:19:59 0:10:27 0:09:32 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
dead 2456930 2018-04-30 18:25:11 2018-04-30 23:52:11 2018-05-01 11:54:47 12:02:36 11:51:10 0:11:26 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
Failure Reason:

SSH connection to smithi114 was lost: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2456931 2018-04-30 18:25:12 2018-04-30 23:52:12 2018-05-01 00:22:12 0:30:00 0:20:08 0:09:52 smithi master rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read-overwrites.yaml} 2
Failure Reason:

"2018-05-01 00:10:54.985316 osd.4 osd.4 172.21.15.57:6800/16574 39 : cluster [ERR] Error -2 reading object 2:1bbafdf8:::smithi20114822-403 oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo:head" in cluster log

pass 2456932 2018-04-30 18:25:12 2018-04-30 23:52:22 2018-05-01 00:14:21 0:21:59 0:08:38 0:13:21 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml} 3
pass 2456933 2018-04-30 18:25:13 2018-04-30 23:52:35 2018-05-01 00:10:34 0:17:59 0:10:06 0:07:53 smithi master rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_seq_read.yaml} 1
fail 2456934 2018-04-30 18:25:14 2018-04-30 23:52:42 2018-05-01 00:18:41 0:25:59 0:13:27 0:12:32 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2456935 2018-04-30 18:25:15 2018-04-30 23:52:48 2018-05-01 00:22:47 0:29:59 0:19:40 0:10:19 smithi master rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/sync.yaml workloads/rados_api_tests.yaml} 2
fail 2456936 2018-04-30 18:25:15 2018-04-30 23:53:08 2018-05-01 00:17:08 0:24:00 0:13:54 0:10:06 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2456937 2018-04-30 18:25:16 2018-04-30 23:53:39 2018-05-01 00:15:39 0:22:00 0:10:25 0:11:35 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml} 2
pass 2456938 2018-04-30 18:25:17 2018-04-30 23:54:35 2018-05-01 00:16:34 0:21:59 0:09:59 0:12:00 smithi master rados/singleton/{all/resolve_stuck_peering.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml} 2
pass 2456939 2018-04-30 18:25:17 2018-04-30 23:54:34 2018-05-01 00:30:35 0:36:01 0:22:40 0:13:21 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
pass 2456940 2018-04-30 18:25:18 2018-04-30 23:54:52 2018-05-01 00:24:52 0:30:00 0:18:37 0:11:23 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 2456941 2018-04-30 18:25:19 2018-04-30 23:54:53 2018-05-01 00:20:53 0:26:00 0:15:09 0:10:51 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/repair_test.yaml} 2
pass 2456942 2018-04-30 18:25:20 2018-04-30 23:56:32 2018-05-01 00:10:31 0:13:59 0:06:00 0:07:59 smithi master rados/singleton-nomsgr/{all/export-after-evict.yaml rados.yaml} 1
pass 2456943 2018-04-30 18:25:20 2018-04-30 23:57:05 2018-05-01 00:49:06 0:52:01 0:41:58 0:10:03 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
pass 2456944 2018-04-30 18:25:21 2018-04-30 23:57:24 2018-05-01 00:21:24 0:24:00 0:14:15 0:09:45 smithi master rados/multimon/{clusters/9.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/mon_clock_with_skews.yaml} 3
fail 2456945 2018-04-30 18:25:22 2018-04-30 23:57:40 2018-05-01 00:17:39 0:19:59 0:09:36 0:10:23 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'

pass 2456946 2018-04-30 18:25:22 2018-04-30 23:58:10 2018-05-01 00:16:10 0:18:00 0:10:05 0:07:55 smithi master rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml} 1
pass 2456947 2018-04-30 18:25:23 2018-04-30 23:58:31 2018-05-01 00:16:30 0:17:59 0:08:54 0:09:05 smithi master rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4M_write.yaml} 1
pass 2456948 2018-04-30 18:25:24 2018-04-30 23:58:31 2018-05-01 00:20:31 0:22:00 0:11:14 0:10:46 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
pass 2456949 2018-04-30 18:25:25 2018-04-30 23:58:36 2018-05-01 02:28:38 2:30:02 2:21:26 0:08:36 smithi master rados/objectstore/filestore-idempotent-aio-journal.yaml 1
pass 2456950 2018-04-30 18:25:25 2018-04-30 23:59:10 2018-05-01 00:19:10 0:20:00 0:10:56 0:09:04 smithi master rados/standalone/misc.yaml 1
fail 2456951 2018-04-30 18:25:26 2018-04-30 23:59:10 2018-05-01 00:35:10 0:36:00 0:24:43 0:11:17 smithi master rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml} 2
Failure Reason:

"2018-05-01 00:15:55.963259 osd.7 osd.7 172.21.15.116:6812/12459 58 : cluster [ERR] 2.15s0 missing primary copy of 2:aa6bc2eb:::benchmark_data_smithi125_14610_object18:head, will try copies on 2(2),4(1)" in cluster log

pass 2456952 2018-04-30 18:25:27 2018-04-30 23:59:19 2018-05-01 00:19:19 0:20:00 0:09:25 0:10:35 smithi master rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml} 3
pass 2456953 2018-04-30 18:25:27 2018-04-30 23:59:23 2018-05-01 00:21:23 0:22:00 0:10:15 0:11:45 smithi master rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 2456954 2018-04-30 18:25:28 2018-05-01 00:00:56 2018-05-01 00:20:54 0:19:58 0:09:56 0:10:02 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
fail 2456955 2018-04-30 18:25:29 2018-05-01 00:00:57 2018-05-01 00:18:55 0:17:58 0:06:20 0:11:38 smithi master rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/workunits.yaml} 2
Failure Reason:

"2018-05-01 00:15:21.970958 mon.a mon.0 172.21.15.26:6789/0 71 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

pass 2456956 2018-04-30 18:25:30 2018-05-01 00:00:56 2018-05-01 00:24:54 0:23:58 0:14:04 0:09:54 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
fail 2456957 2018-04-30 18:25:30 2018-05-01 00:00:57 2018-05-01 00:26:55 0:25:58 0:15:12 0:10:46 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'

pass 2456958 2018-04-30 18:25:31 2018-05-01 00:00:55 2018-05-01 00:42:55 0:42:00 0:31:49 0:10:11 smithi master rados/singleton/{all/thrash-eio.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml} 2
pass 2456959 2018-04-30 18:25:32 2018-05-01 00:00:55 2018-05-01 00:36:55 0:36:00 0:25:29 0:10:31 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
pass 2456960 2018-04-30 18:25:32 2018-05-01 00:01:13 2018-05-01 00:25:12 0:23:59 0:12:27 0:11:32 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
pass 2456961 2018-04-30 18:25:33 2018-05-01 00:02:37 2018-05-01 00:24:37 0:22:00 0:11:50 0:10:10 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 2456962 2018-04-30 18:25:34 2018-05-01 00:02:47 2018-05-01 00:30:47 0:28:00 0:22:11 0:05:49 smithi master centos 7.4 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported/centos_latest.yaml thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
fail 2456963 2018-04-30 18:25:35 2018-05-01 00:02:55 2018-05-01 00:24:54 0:21:59 0:10:27 0:11:32 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 3
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=2 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2456964 2018-04-30 18:25:35 2018-05-01 00:03:00 2018-05-01 00:33:00 0:30:00 0:21:27 0:08:33 smithi master centos rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_cls_all.yaml validater/valgrind.yaml} 2
pass 2456965 2018-04-30 18:25:36 2018-05-01 00:03:08 2018-05-01 00:33:08 0:30:00 0:19:59 0:10:01 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
pass 2456966 2018-04-30 18:25:37 2018-05-01 00:04:16 2018-05-01 00:24:16 0:20:00 0:10:42 0:09:18 smithi master rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/sample_fio.yaml} 1
pass 2456967 2018-04-30 18:25:37 2018-05-01 00:04:26 2018-05-01 00:28:25 0:23:59 0:15:03 0:08:56 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rgw_snaps.yaml} 2
pass 2456968 2018-04-30 18:25:38 2018-05-01 00:04:31 2018-05-01 00:34:31 0:30:00 0:17:14 0:12:46 smithi master rados/singleton/{all/thrash-rados/{thrash-rados.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml} 2
pass 2456969 2018-04-30 18:25:39 2018-05-01 00:04:35 2018-05-01 00:20:34 0:15:59 0:06:43 0:09:16 smithi master rados/singleton-nomsgr/{all/full-tiering.yaml rados.yaml} 1
fail 2456970 2018-04-30 18:25:39 2018-05-01 00:04:38 2018-05-01 00:26:37 0:21:59 0:10:18 0:11:41 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'

fail 2456971 2018-04-30 18:25:40 2018-05-01 00:05:28 2018-05-01 00:35:28 0:30:00 0:15:54 0:14:06 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2456972 2018-04-30 18:25:41 2018-05-01 00:05:28 2018-05-01 00:29:28 0:24:00 0:12:30 0:11:30 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2456973 2018-04-30 18:25:42 2018-05-01 00:06:00 2018-05-01 00:36:00 0:30:00 0:17:10 0:12:50 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
pass 2456974 2018-04-30 18:25:42 2018-05-01 00:06:22 2018-05-01 00:28:21 0:21:59 0:09:40 0:12:19 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml} 2
pass 2456975 2018-04-30 18:25:43 2018-05-01 00:06:39 2018-05-01 00:36:38 0:29:59 0:17:59 0:12:00 smithi master rados/singleton/{all/thrash_cache_writeback_proxy_none.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml} 2
pass 2456976 2018-04-30 18:25:44 2018-05-01 00:06:55 2018-05-01 01:16:56 1:10:01 0:58:25 0:11:36 smithi master rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 2456977 2018-04-30 18:25:44 2018-05-01 00:08:27 2018-05-01 00:36:27 0:28:00 0:16:47 0:11:13 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'

pass 2456978 2018-04-30 18:25:45 2018-05-01 00:08:32 2018-05-01 00:40:32 0:32:00 0:20:08 0:11:52 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 2456979 2018-04-30 18:25:46 2018-05-01 00:08:33 2018-05-01 00:26:33 0:18:00 0:08:04 0:09:56 smithi master rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/sample_radosbench.yaml} 1
fail 2456980 2018-04-30 18:25:47 2018-05-01 00:08:34 2018-05-01 00:56:35 0:48:01 0:37:18 0:10:43 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
Failure Reason:

"2018-05-01 00:31:08.391500 osd.2 osd.2 172.21.15.154:6813/12884 260 : cluster [ERR] 5.28 missing primary copy of 5:17a68261:::benchmark_data_smithi154_56211_object2823:head, unfound" in cluster log

pass 2456981 2018-04-30 18:25:47 2018-05-01 00:08:45 2018-05-01 00:28:44 0:19:59 0:09:42 0:10:17 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
pass 2456982 2018-04-30 18:25:48 2018-05-01 00:10:07 2018-05-01 02:44:13 2:34:06 2:25:39 0:08:27 smithi master rados/objectstore/filestore-idempotent.yaml 1
pass 2456983 2018-04-30 18:25:49 2018-05-01 00:10:25 2018-05-01 00:24:24 0:13:59 0:06:13 0:07:46 smithi master rados/singleton/{all/watch-notify-same-primary.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml} 1
fail 2456984 2018-04-30 18:25:49 2018-05-01 00:10:30 2018-05-01 00:30:29 0:19:59 0:09:42 0:10:17 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 50 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'

pass 2456985 2018-04-30 18:25:50 2018-05-01 00:10:31 2018-05-01 00:30:31 0:20:00 0:09:32 0:10:28 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/scrub_test.yaml} 2
pass 2456986 2018-04-30 18:25:51 2018-05-01 00:10:32 2018-05-01 00:30:32 0:20:00 0:11:12 0:08:48 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
pass 2456987 2018-04-30 18:25:52 2018-05-01 00:10:36 2018-05-01 00:26:35 0:15:59 0:06:41 0:09:18 smithi master rados/singleton-nomsgr/{all/health-warnings.yaml rados.yaml} 1
fail 2456988 2018-04-30 18:25:52 2018-05-01 00:10:36 2018-05-01 00:36:36 0:26:00 0:15:39 0:10:21 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_chunk --low_tier_pool low_tier --max-ops 4000 --objects 300 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'

fail 2456989 2018-04-30 18:25:53 2018-05-01 00:11:02 2018-05-01 00:41:01 0:29:59 0:19:54 0:10:05 smithi master rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml} 2
Failure Reason:

"2018-05-01 00:34:58.250153 osd.3 osd.3 172.21.15.37:6812/12725 327 : cluster [ERR] Error -2 reading object 2:bc59d76e:::smithi03714684-980:17" in cluster log

pass 2456990 2018-04-30 18:25:54 2018-05-01 00:11:04 2018-05-01 00:47:04 0:36:00 0:24:06 0:11:54 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml} 3
pass 2456991 2018-04-30 18:25:55 2018-05-01 00:11:20 2018-05-01 00:41:19 0:29:59 0:19:23 0:10:36 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
pass 2456992 2018-04-30 18:25:55 2018-05-01 00:11:20 2018-05-01 00:27:18 0:15:58 0:06:46 0:09:12 smithi master rados/singleton/{all/admin-socket.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
pass 2456993 2018-04-30 18:25:56 2018-05-01 00:11:56 2018-05-01 00:41:55 0:29:59 0:20:43 0:09:16 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
fail 2456994 2018-04-30 18:25:57 2018-05-01 00:12:06 2018-05-01 00:34:05 0:21:59 0:12:46 0:09:13 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --write-fadvise-dontneed --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'

pass 2456995 2018-04-30 18:25:57 2018-05-01 00:12:39 2018-05-01 00:40:39 0:28:00 0:18:26 0:09:34 smithi master rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore.yaml tasks/dashboard.yaml} 2
pass 2456996 2018-04-30 18:25:58 2018-05-01 00:12:39 2018-05-01 00:36:38 0:23:59 0:14:37 0:09:22 smithi master rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/cosbench_64K_read_write.yaml} 1
fail 2456997 2018-04-30 18:25:59 2018-05-01 00:12:39 2018-05-01 00:42:39 0:30:00 0:19:25 0:10:35 smithi master rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
Failure Reason:

Command failed (workunit test cephtool/test.sh) on smithi083 with status 252: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-partial-recovery-20180430 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'

pass 2456998 2018-04-30 18:26:00 2018-05-01 00:12:39 2018-05-01 00:38:38 0:25:59 0:14:36 0:11:23 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 2456999 2018-04-30 18:26:00 2018-05-01 00:13:07 2018-05-01 00:45:07 0:32:00 0:23:02 0:08:58 smithi master rados/standalone/mon.yaml 1
pass 2457000 2018-04-30 18:26:01 2018-05-01 00:13:10 2018-05-01 00:41:10 0:28:00 0:19:17 0:08:43 smithi master rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml} 2
pass 2457001 2018-04-30 18:26:02 2018-05-01 00:13:21 2018-05-01 00:49:21 0:36:00 0:23:03 0:12:57 smithi master rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml} 3
pass 2457002 2018-04-30 18:26:02 2018-05-01 00:14:25 2018-05-01 00:36:23 0:21:58 0:09:32 0:12:26 smithi master rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
dead 2457003 2018-04-30 18:26:03 2018-05-01 00:14:30 2018-05-01 12:17:01 12:02:31 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
pass 2457004 2018-04-30 18:26:04 2018-05-01 00:15:05 2018-05-01 00:33:04 0:17:59 0:08:21 0:09:38 smithi master rados/singleton/{all/divergent_priors.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml} 1
fail 2457005 2018-04-30 18:26:04 2018-05-01 00:15:10 2018-05-01 00:37:10 0:22:00 0:11:32 0:10:28 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'

pass 2457006 2018-04-30 18:26:05 2018-05-01 00:15:50 2018-05-01 00:43:49 0:27:59 0:17:31 0:10:28 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
pass 2457007 2018-04-30 18:26:06 2018-05-01 00:15:56 2018-05-01 00:47:56 0:32:00 0:19:18 0:12:42 smithi master rados/multimon/{clusters/21.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml} 3
fail 2457008 2018-04-30 18:26:07 2018-05-01 00:16:05 2018-05-01 00:36:05 0:20:00 0:10:27 0:09:33 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2457009 2018-04-30 18:26:07 2018-05-01 00:16:11 2018-05-01 00:44:11 0:28:00 0:19:23 0:08:37 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_api_tests.yaml} 2
fail 2457010 2018-04-30 18:26:08 2018-05-01 00:16:31 2018-05-01 00:36:31 0:20:00 0:09:24 0:10:36 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2457011 2018-04-30 18:26:09 2018-05-01 00:16:35 2018-05-01 00:52:35 0:36:00 0:28:48 0:07:12 smithi master rhel 7.5 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} supported/rhel_latest.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
fail 2457012 2018-04-30 18:26:10 2018-05-01 00:17:18 2018-05-01 00:39:18 0:22:00 0:10:57 0:11:03 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml} 2
Failure Reason:

"2018-05-01 00:36:52.510366 osd.5 osd.5 172.21.15.39:6800/12736 170 : cluster [ERR] 3.0 : soid 3:04765b2b:::smithi19816171-54:head data_digest 0x43603094 != data_digest 0x772b4d36 from shard 5" in cluster log

pass 2457013 2018-04-30 18:26:10 2018-05-01 00:17:18 2018-05-01 00:35:18 0:18:00 0:09:35 0:08:25 smithi master rados/singleton/{all/divergent_priors2.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml} 1
pass 2457014 2018-04-30 18:26:11 2018-05-01 00:17:41 2018-05-01 00:33:40 0:15:59 0:06:03 0:09:56 smithi master rados/singleton-nomsgr/{all/large-omap-object-warnings.yaml rados.yaml} 1
pass 2457015 2018-04-30 18:26:12 2018-05-01 00:18:34 2018-05-01 00:46:34 0:28:00 0:17:24 0:10:36 smithi master rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/cosbench_64K_write.yaml} 1
fail 2457016 2018-04-30 18:26:12 2018-05-01 00:18:34 2018-05-01 00:58:34 0:40:00 0:28:26 0:11:34 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
Failure Reason:

"2018-05-01 00:44:55.751921 osd.3 osd.3 172.21.15.119:6812/12818 460 : cluster [ERR] 2.66 missing primary copy of 2:669460aa:::smithi11916110-30:1, unfound" in cluster log

pass 2457017 2018-04-30 18:26:13 2018-05-01 00:18:42 2018-05-01 00:44:42 0:26:00 0:16:26 0:09:34 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 2457018 2018-04-30 18:26:14 2018-05-01 00:18:43 2018-05-01 00:42:42 0:23:59 0:11:49 0:12:10 smithi master rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/many.yaml workloads/rados_mon_workunits.yaml} 2
pass 2457019 2018-04-30 18:26:15 2018-05-01 00:18:56 2018-05-01 00:32:56 0:14:00 0:04:49 0:09:11 smithi master rados/objectstore/fusestore.yaml 1
pass 2457020 2018-04-30 18:26:15 2018-05-01 00:19:12 2018-05-01 00:39:11 0:19:59 0:08:57 0:11:02 smithi master rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml validater/lockdep.yaml} 2
fail 2457021 2018-04-30 18:26:16 2018-05-01 00:19:13 2018-05-01 01:17:13 0:58:00 0:46:26 0:11:34 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
Failure Reason:

"2018-05-01 00:51:38.725205 osd.6 osd.6 172.21.15.188:6808/12863 533 : cluster [ERR] 6.26 5 tried to pull 6:653616e1:::benchmark_data_smithi035_79362_object158:head but got (2) No such file or directory" in cluster log

fail 2457022 2018-04-30 18:26:17 2018-05-01 00:19:20 2018-05-01 00:43:20 0:24:00 0:12:21 0:11:39 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
Failure Reason:

"2018-05-01 00:38:36.893043 osd.3 osd.3 172.21.15.112:6813/12690 169 : cluster [ERR] 2.2 : soid 2:6cb639f2:::smithi11216359-328 low_tier:head data_digest 0x5681d562 != data_digest 0xa6d8f318 from shard 3" in cluster log

pass 2457023 2018-04-30 18:26:17 2018-05-01 00:20:48 2018-05-01 00:42:47 0:21:59 0:09:44 0:12:15 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rbd_cls.yaml} 3
pass 2457024 2018-04-30 18:26:18 2018-05-01 00:20:48 2018-05-01 00:36:47 0:15:59 0:07:45 0:08:14 smithi master rados/singleton/{all/dump-stuck.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml} 1
pass 2457025 2018-04-30 18:26:19 2018-05-01 00:20:55 2018-05-01 00:42:54 0:21:59 0:11:06 0:10:53 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
pass 2457026 2018-04-30 18:26:19 2018-05-01 00:20:56 2018-05-01 00:44:55 0:23:59 0:12:49 0:11:10 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
pass 2457027 2018-04-30 18:26:20 2018-05-01 00:21:08 2018-05-01 00:45:07 0:23:59 0:13:22 0:10:37 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
pass 2457028 2018-04-30 18:26:21 2018-05-01 00:21:25 2018-05-01 00:43:24 0:21:59 0:11:06 0:10:53 smithi master rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/fio_4K_rand_read.yaml} 1
fail 2457029 2018-04-30 18:26:22 2018-05-01 00:21:26 2018-05-01 00:53:25 0:31:59 0:18:48 0:13:11 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
Failure Reason:

"2018-05-01 00:45:07.502839 osd.5 osd.5 172.21.15.93:6813/19678 107 : cluster [ERR] 2.3c : soid 2:3d20e30e:::smithi00814248-471 oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo:head data_digest 0x51fe7bb9 != data_digest 0x8a61aec7 from shard 5" in cluster log

pass 2457030 2018-04-30 18:26:22 2018-05-01 00:22:27 2018-05-01 00:54:26 0:31:59 0:21:57 0:10:02 smithi master rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
pass 2457031 2018-04-30 18:26:23 2018-05-01 00:22:50 2018-05-01 00:40:49 0:17:59 0:08:37 0:09:22 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_cls_all.yaml} 2
fail 2457032 2018-04-30 18:26:24 2018-05-01 00:23:18 2018-05-01 00:51:17 0:27:59 0:18:08 0:09:51 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'

fail 2457033 2018-04-30 18:26:25 2018-05-01 00:24:25 2018-05-01 00:46:25 0:22:00 0:10:30 0:11:30 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --write-fadvise-dontneed --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'

pass 2457034 2018-04-30 18:26:25 2018-05-01 00:24:25 2018-05-01 00:56:25 0:32:00 0:23:01 0:08:59 smithi master rados/singleton-nomsgr/{all/msgr.yaml rados.yaml} 1
pass 2457035 2018-04-30 18:26:26 2018-05-01 00:24:30 2018-05-01 00:48:30 0:24:00 0:15:14 0:08:46 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 2457036 2018-04-30 18:26:27 2018-05-01 00:24:33 2018-05-01 00:48:32 0:23:59 0:13:35 0:10:24 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
pass 2457037 2018-04-30 18:26:27 2018-05-01 00:24:35 2018-05-01 00:38:35 0:14:00 0:04:48 0:09:12 smithi master rados/singleton/{all/erasure-code-nonregression.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml} 1
fail 2457038 2018-04-30 18:26:28 2018-05-01 00:24:38 2018-05-01 00:44:38 0:20:00 0:09:37 0:10:23 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'

fail 2457039 2018-04-30 18:26:29 2018-05-01 00:24:53 2018-05-01 00:44:53 0:20:00 0:09:10 0:10:50 smithi master rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/failover.yaml} 2
Failure Reason:

"2018-05-01 00:39:10.986607 mon.b mon.0 172.21.15.90:6789/0 71 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

fail 2457040 2018-04-30 18:26:29 2018-05-01 00:24:56 2018-05-01 00:54:55 0:29:59 0:19:45 0:10:14 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2457041 2018-04-30 18:26:30 2018-05-01 00:24:56 2018-05-01 00:44:55 0:19:59 0:10:03 0:09:56 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2457042 2018-04-30 18:26:31 2018-05-01 00:25:14 2018-05-01 00:45:13 0:19:59 0:10:38 0:09:21 smithi master rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/fio_4K_rand_rw.yaml} 1
fail 2457043 2018-04-30 18:26:31 2018-05-01 00:26:25 2018-05-01 00:46:25 0:20:00 0:09:07 0:10:53 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2457044 2018-04-30 18:26:32 2018-05-01 00:26:34 2018-05-01 01:02:34 0:36:00 0:26:49 0:09:11 smithi master rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml} 2
fail 2457045 2018-04-30 18:26:33 2018-05-01 00:26:36 2018-05-01 00:56:36 0:30:00 0:20:47 0:09:13 smithi master rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml} 1
Failure Reason:

"2018-05-01 00:45:02.920301 mon.a mon.0 172.21.15.153:6789/0 324 : cluster [WRN] Health check failed: 1 slow ops, oldest one blocked for 33 sec (SLOW_OPS)" in cluster log

pass 2457046 2018-04-30 18:26:33 2018-05-01 00:26:39 2018-05-01 00:46:38 0:19:59 0:09:56 0:10:03 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml} 2
pass 2457047 2018-04-30 18:26:34 2018-05-01 00:26:52 2018-05-01 00:40:51 0:13:59 0:05:33 0:08:26 smithi master rados/objectstore/keyvaluedb.yaml 1
pass 2457048 2018-04-30 18:26:35 2018-05-01 00:26:56 2018-05-01 01:24:56 0:58:00 0:50:06 0:07:54 smithi master rados/standalone/osd.yaml 1
pass 2457049 2018-04-30 18:26:35 2018-05-01 00:27:20 2018-05-01 00:55:20 0:28:00 0:19:05 0:08:55 smithi master rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml} 2
pass 2457050 2018-04-30 18:26:36 2018-05-01 00:28:23 2018-05-01 00:48:23 0:20:00 0:09:16 0:10:44 smithi master rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml} 3
pass 2457051 2018-04-30 18:26:37 2018-05-01 00:28:27 2018-05-01 00:52:26 0:23:59 0:09:52 0:14:07 smithi master rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 2457052 2018-04-30 18:26:38 2018-05-01 00:28:45 2018-05-01 01:20:46 0:52:01 0:41:24 0:10:37 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 3
fail 2457053 2018-04-30 18:26:38 2018-05-01 00:29:39 2018-05-01 00:51:39 0:22:00 0:11:19 0:10:41 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'

pass 2457054 2018-04-30 18:26:39 2018-05-01 00:30:30 2018-05-01 00:50:30 0:20:00 0:10:48 0:09:12 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_python.yaml} 2
pass 2457055 2018-04-30 18:26:40 2018-05-01 00:30:32 2018-05-01 01:02:32 0:32:00 0:20:54 0:11:06 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 2457056 2018-04-30 18:26:40 2018-05-01 00:30:33 2018-05-01 01:02:33 0:32:00 0:20:33 0:11:27 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
fail 2457057 2018-04-30 18:26:41 2018-05-01 00:30:36 2018-05-01 01:24:36 0:54:00 0:44:15 0:09:45 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
Failure Reason:

failed to recover before timeout expired

pass 2457058 2018-04-30 18:26:42 2018-05-01 00:30:57 2018-05-01 00:58:57 0:28:00 0:18:25 0:09:35 smithi master rados/singleton/{all/lost-unfound.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml} 1
pass 2457059 2018-04-30 18:26:43 2018-05-01 00:32:29 2018-05-01 01:04:28 0:31:59 0:21:31 0:10:28 smithi master rados/singleton-nomsgr/{all/multi-backfill-reject.yaml rados.yaml} 2
pass 2457060 2018-04-30 18:26:43 2018-05-01 00:32:58 2018-05-01 01:16:57 0:43:59 0:34:46 0:09:13 smithi master rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/one.yaml workloads/snaps-few-objects.yaml} 2
pass 2457061 2018-04-30 18:26:44 2018-05-01 00:33:01 2018-05-01 00:55:01 0:22:00 0:10:54 0:11:06 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
pass 2457062 2018-04-30 18:26:45 2018-05-01 00:33:06 2018-05-01 01:07:05 0:33:59 0:23:47 0:10:12 smithi master ubuntu 16.04 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported/ubuntu_latest.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 2457063 2018-04-30 18:26:45 2018-05-01 00:33:09 2018-05-01 00:51:09 0:18:00 0:08:40 0:09:20 smithi master rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_read.yaml} 1
pass 2457064 2018-04-30 18:26:46 2018-05-01 00:33:52 2018-05-01 00:53:52 0:20:00 0:10:56 0:09:04 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
fail 2457065 2018-04-30 18:26:47 2018-05-01 00:34:06 2018-05-01 00:58:06 0:24:00 0:14:13 0:09:47 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
Failure Reason:

"2018-05-01 00:52:03.996139 osd.0 osd.0 172.21.15.28:6801/12898 148 : cluster [ERR] 3.16 : soid 3:6ba4015d:::smithi02815443-257 oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo:head data_digest 0xfed84bd1 != data_digest 0x6322c9e8 from shard 0" in cluster log

pass 2457066 2018-04-30 18:26:47 2018-05-01 00:34:33 2018-05-01 00:52:32 0:17:59 0:06:56 0:11:03 smithi master rados/multimon/{clusters/3.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_clock_no_skews.yaml} 2
fail 2457067 2018-04-30 18:26:48 2018-05-01 00:35:30 2018-05-01 01:01:26 0:25:56 0:15:27 0:10:29 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'

pass 2457068 2018-04-30 18:26:49 2018-05-01 00:35:30 2018-05-01 00:51:26 0:15:56 0:06:31 0:09:25 smithi master rados/singleton/{all/max-pg-per-osd.from-mon.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
fail 2457069 2018-04-30 18:26:49 2018-05-01 00:35:30 2018-05-01 00:59:26 0:23:56 0:14:18 0:09:38 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'

pass 2457070 2018-04-30 18:26:50 2018-05-01 00:35:33 2018-05-01 00:57:30 0:21:57 0:12:16 0:09:41 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
pass 2457071 2018-04-30 18:26:51 2018-05-01 00:36:02 2018-05-01 01:08:01 0:31:59 0:22:26 0:09:33 smithi master rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml} 1
pass 2457072 2018-04-30 18:26:52 2018-05-01 00:36:06 2018-05-01 00:56:05 0:19:59 0:10:28 0:09:31 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 2457073 2018-04-30 18:26:52 2018-05-01 00:36:25 2018-05-01 01:16:24 0:39:59 0:34:02 0:05:57 smithi master centos rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml} 2
pass 2457074 2018-04-30 18:26:53 2018-05-01 00:36:39 2018-05-01 00:58:39 0:22:00 0:12:13 0:09:47 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_stress_watch.yaml} 2
dead 2457075 2018-04-30 18:26:54 2018-05-01 00:36:40 2018-05-01 12:39:11 12:02:31 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
pass 2457076 2018-04-30 18:26:54 2018-05-01 00:36:39 2018-05-01 00:54:39 0:18:00 0:09:58 0:08:02 smithi master rados/singleton/{all/max-pg-per-osd.from-primary.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml} 1
pass 2457077 2018-04-30 18:26:55 2018-05-01 00:36:40 2018-05-01 00:54:39 0:17:59 0:09:11 0:08:48 smithi master rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_rw.yaml} 1
fail 2457078 2018-04-30 18:26:56 2018-05-01 00:36:40 2018-05-01 00:58:39 0:21:59 0:10:34 0:11:25 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool base'

fail 2457079 2018-04-30 18:26:56 2018-05-01 00:36:48 2018-05-01 00:58:48 0:22:00 0:11:05 0:10:55 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2457080 2018-04-30 18:26:57 2018-05-01 00:36:56 2018-05-01 00:58:56 0:22:00 0:09:55 0:12:05 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml} 3
pass 2457081 2018-04-30 18:26:58 2018-05-01 00:37:12 2018-05-01 00:53:11 0:15:59 0:06:01 0:09:58 smithi master rados/singleton-nomsgr/{all/pool-access.yaml rados.yaml} 1
pass 2457082 2018-04-30 18:26:58 2018-05-01 00:38:39 2018-05-01 01:04:39 0:26:00 0:16:34 0:09:26 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
fail 2457083 2018-04-30 18:26:59 2018-05-01 00:38:40 2018-05-01 01:36:40 0:58:00 0:48:03 0:09:57 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2457084 2018-04-30 18:27:00 2018-05-01 00:39:13 2018-05-01 01:05:13 0:26:00 0:15:53 0:10:07 smithi master rados/objectstore/objectcacher-stress.yaml 1
pass 2457085 2018-04-30 18:27:01 2018-05-01 00:39:20 2018-05-01 01:01:19 0:21:59 0:12:59 0:09:00 smithi master rados/singleton/{all/max-pg-per-osd.from-replica.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml} 1
fail 2457086 2018-04-30 18:27:01 2018-05-01 00:40:43 2018-05-01 00:58:42 0:17:59 0:08:35 0:09:24 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2457087 2018-04-30 18:27:02 2018-05-01 00:40:43 2018-05-01 01:02:42 0:21:59 0:11:54 0:10:05 smithi master rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml tasks/module_selftest.yaml} 2
Failure Reason:

"2018-05-01 00:54:34.464688 mon.a mon.0 172.21.15.101:6789/0 95 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

fail 2457088 2018-04-30 18:27:03 2018-05-01 00:40:50 2018-05-01 01:58:51 1:18:01 1:07:06 0:10:55 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'

pass 2457089 2018-04-30 18:27:03 2018-05-01 00:40:53 2018-05-01 01:10:52 0:29:59 0:19:57 0:10:02 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 2457090 2018-04-30 18:27:04 2018-05-01 00:41:03 2018-05-01 01:25:03 0:44:00 0:32:58 0:11:02 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
pass 2457091 2018-04-30 18:27:05 2018-05-01 00:41:11 2018-05-01 00:59:11 0:18:00 0:09:54 0:08:06 smithi master rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml workloads/fio_4M_rand_write.yaml} 1
pass 2457092 2018-04-30 18:27:05 2018-05-01 00:41:20 2018-05-01 01:01:20 0:20:00 0:11:04 0:08:56 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
pass 2457093 2018-04-30 18:27:06 2018-05-01 00:42:07 2018-05-01 00:58:06 0:15:59 0:06:31 0:09:28 smithi master rados/singleton/{all/mon-auth-caps.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml} 1
pass 2457094 2018-04-30 18:27:07 2018-05-01 00:42:32 2018-05-01 01:00:31 0:17:59 0:07:10 0:10:49 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_striper.yaml} 2
pass 2457095 2018-04-30 18:27:07 2018-05-01 00:42:40 2018-05-01 01:04:40 0:22:00 0:11:23 0:10:37 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
pass 2457096 2018-04-30 18:27:08 2018-05-01 00:42:44 2018-05-01 01:04:43 0:21:59 0:11:45 0:10:14 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
pass 2457097 2018-04-30 18:27:09 2018-05-01 00:42:48 2018-05-01 01:06:48 0:24:00 0:14:29 0:09:31 smithi master rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/sync-many.yaml workloads/pool-create-delete.yaml} 2
pass 2457098 2018-04-30 18:27:10 2018-05-01 00:42:55 2018-05-01 01:28:55 0:46:00 0:36:02 0:09:58 smithi master rados/standalone/scrub.yaml 1
pass 2457099 2018-04-30 18:27:10 2018-05-01 00:42:56 2018-05-01 01:22:56 0:40:00 0:30:12 0:09:48 smithi master rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml} 2
pass 2457100 2018-04-30 18:27:11 2018-05-01 00:43:32 2018-05-01 01:19:32 0:36:00 0:25:04 0:10:56 smithi master rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml} 3
pass 2457101 2018-04-30 18:27:12 2018-05-01 00:43:33 2018-05-01 01:05:32 0:21:59 0:10:11 0:11:48 smithi master rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 2457102 2018-04-30 18:27:12 2018-05-01 00:43:51 2018-05-01 01:07:51 0:24:00 0:14:27 0:09:33 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
pass 2457103 2018-04-30 18:27:13 2018-05-01 00:44:12 2018-05-01 01:12:12 0:28:00 0:17:45 0:10:15 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
pass 2457104 2018-04-30 18:27:14 2018-05-01 00:44:32 2018-05-01 01:04:31 0:19:59 0:11:09 0:08:50 smithi master rados/singleton/{all/mon-config-keys.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml} 1
fail 2457105 2018-04-30 18:27:14 2018-05-01 00:44:55 2018-05-01 01:16:53 0:31:58 0:23:40 0:08:18 smithi master rados/singleton-nomsgr/{all/recovery-unfound-found.yaml rados.yaml} 1
Failure Reason:

"2018-05-01 01:10:57.277024 osd.0 osd.0 172.21.15.131:6801/13234 7 : cluster [ERR] 2.0 shard 0: soid 2:002fb70e:::benchmark_data_smithi131_13369_object3791:head data_digest 0x6706be76 != data_digest 0xa6525ea7 from auth oi 2:002fb70e:::benchmark_data_smithi131_13369_object3791:head(16'1999 client.4230.0:3792 dirty|data_digest s 4096 uv 1999 dd a6525ea7 alloc_hint [4096 4096 53])" in cluster log

pass 2457106 2018-04-30 18:27:15 2018-05-01 00:44:53 2018-05-01 01:22:53 0:38:00 0:28:12 0:09:48 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
pass 2457107 2018-04-30 18:27:16 2018-05-01 00:44:56 2018-05-01 01:20:55 0:35:59 0:26:59 0:09:00 smithi master rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml} 2
fail 2457108 2018-04-30 18:27:16 2018-05-01 00:44:57 2018-05-01 01:06:56 0:21:59 0:10:00 0:11:59 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --write-fadvise-dontneed --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'

pass 2457109 2018-04-30 18:27:17 2018-05-01 00:44:58 2018-05-01 01:04:57 0:19:59 0:10:37 0:09:22 smithi master rados/perf/{ceph.yaml objectstore/bluestore.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_rand_read.yaml} 1
pass 2457110 2018-04-30 18:27:18 2018-05-01 00:45:09 2018-05-01 01:09:08 0:23:59 0:13:11 0:10:48 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 2457111 2018-04-30 18:27:19 2018-05-01 00:45:09 2018-05-01 01:15:08 0:29:59 0:22:48 0:07:11 smithi master centos 7.4 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} supported/centos_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 2457112 2018-04-30 18:27:19 2018-05-01 00:45:13 2018-05-01 01:13:13 0:28:00 0:17:38 0:10:22 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 3
pass 2457113 2018-04-30 18:27:20 2018-05-01 00:45:15 2018-05-01 01:15:15 0:30:00 0:21:06 0:08:54 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
pass 2457114 2018-04-30 18:27:21 2018-05-01 00:46:36 2018-05-01 01:02:35 0:15:59 0:06:22 0:09:37 smithi master rados/singleton/{all/mon-config.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml} 1
pass 2457115 2018-04-30 18:27:21 2018-05-01 00:46:36 2018-05-01 01:06:36 0:20:00 0:11:09 0:08:51 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
fail 2457116 2018-04-30 18:27:22 2018-05-01 00:46:36 2018-05-01 01:12:36 0:26:00 0:15:00 0:11:00 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2457117 2018-04-30 18:27:23 2018-05-01 00:46:40 2018-05-01 06:04:47 5:18:07 5:05:01 0:13:06 smithi master rados/objectstore/objectstore.yaml 1
pass 2457118 2018-04-30 18:27:23 2018-05-01 00:46:42 2018-05-01 01:24:42 0:38:00 0:27:57 0:10:03 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_workunit_loadgen_big.yaml} 2
fail 2457119 2018-04-30 18:27:24 2018-05-01 00:47:05 2018-05-01 01:07:04 0:19:59 0:08:53 0:11:06 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2457120 2018-04-30 18:27:25 2018-05-01 00:48:07 2018-05-01 01:12:07 0:24:00 0:11:52 0:12:08 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

fail 2457121 2018-04-30 18:27:25 2018-05-01 00:48:24 2018-05-01 01:16:24 0:28:00 0:19:12 0:08:48 smithi master rados/singleton/{all/mon-seesaw.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml} 1
Failure Reason:

"2018-05-01 01:14:25.210642 mon.a mon.0 172.21.15.157:6789/0 21 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log

fail 2457122 2018-04-30 18:27:26 2018-05-01 00:48:31 2018-05-01 01:08:31 0:20:00 0:09:50 0:10:10 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'

pass 2457123 2018-04-30 18:27:27 2018-05-01 00:48:33 2018-05-01 01:06:33 0:18:00 0:06:51 0:11:09 smithi master rados/multimon/{clusters/6.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_clock_with_skews.yaml} 2
pass 2457124 2018-04-30 18:27:28 2018-05-01 00:49:18 2018-05-01 01:07:18 0:18:00 0:08:18 0:09:42 smithi master rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml workloads/radosbench_4K_seq_read.yaml} 1
pass 2457125 2018-04-30 18:27:28 2018-05-01 00:49:18 2018-05-01 01:21:18 0:32:00 0:21:19 0:10:41 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
pass 2457126 2018-04-30 18:27:29 2018-05-01 00:49:23 2018-05-01 01:23:22 0:33:59 0:23:38 0:10:21 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 2457127 2018-04-30 18:27:30 2018-05-01 00:50:35 2018-05-01 01:08:34 0:17:59 0:12:10 0:05:49 smithi master centos rados/singleton-nomsgr/{all/valgrind-leaks.yaml rados.yaml} 1
pass 2457128 2018-04-30 18:27:30 2018-05-01 00:50:35 2018-05-01 01:10:34 0:19:59 0:08:13 0:11:46 smithi master rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_cls_all.yaml validater/lockdep.yaml} 2
fail 2457129 2018-04-30 18:27:31 2018-05-01 00:51:10 2018-05-01 01:45:11 0:54:01 0:43:23 0:10:38 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
Failure Reason:

"2018-05-01 01:08:01.742311 osd.3 osd.3 172.21.15.194:6809/13146 99 : cluster [ERR] 2.25 missing primary copy of 2:a4d218dd:::benchmark_data_smithi194_15789_object25:head, will try copies on 6" in cluster log

fail 2457130 2018-04-30 18:27:32 2018-05-01 00:51:19 2018-05-01 01:09:18 0:17:59 0:07:47 0:10:12 smithi master rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/prometheus.yaml} 2
Failure Reason:

"2018-05-01 01:05:00.000514 mon.b mon.0 172.21.15.52:6789/0 60 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log

pass 2457131 2018-04-30 18:27:32 2018-05-01 00:51:27 2018-05-01 01:11:27 0:20:00 0:10:04 0:09:56 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
pass 2457132 2018-04-30 18:27:33 2018-05-01 00:51:51 2018-05-01 01:11:50 0:19:59 0:11:11 0:08:48 smithi master rados/singleton/{all/osd-backfill.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml} 1
fail 2457133 2018-04-30 18:27:34 2018-05-01 00:52:28 2018-05-01 01:12:27 0:19:59 0:10:05 0:09:54 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 50 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'

pass 2457134 2018-04-30 18:27:34 2018-05-01 00:52:36 2018-05-01 01:12:35 0:19:59 0:09:58 0:10:01 smithi master rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
fail 2457135 2018-04-30 18:27:35 2018-05-01 00:52:37 2018-05-01 01:12:36 0:19:59 0:09:35 0:10:24 smithi master rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_chunk --low_tier_pool low_tier --max-ops 4000 --objects 300 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'

pass 2457136 2018-04-30 18:27:36 2018-05-01 00:53:23 2018-05-01 01:27:23 0:34:00 0:22:38 0:11:22 smithi master rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml} 3
pass 2457137 2018-04-30 18:27:37 2018-05-01 00:53:27 2018-05-01 01:19:26 0:25:59 0:17:30 0:08:29 smithi master rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_workunit_loadgen_mix.yaml} 2