ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-06-27 04:59:38.376644 mon.a mon.0 172.21.15.45:6789/0 1502 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
centos 7.3
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-btrfs.yaml rados.yaml supported/centos_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-06-27 05:02:49.189886 mon.b mon.0 172.21.15.129:6789/0 4934 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 14 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-06-27 04:51:37.607432 mon.a mon.0 172.21.15.2:6789/0 20 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/divergent_priors.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
"2017-06-27 04:52:48.124558 mon.a mon.0 172.21.15.83:6789/0 79 : cluster [WRN] HEALTH_WARN OSD_FLAGS: noout flag(s) set" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-06-27 04:54:10.978213 mon.b mon.0 172.21.15.80:6789/0 22 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml}
"2017-06-27 05:04:36.395880 mon.b mon.0 172.21.15.155:6789/0 1290 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 6 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
"2017-06-27 05:01:37.277949 mon.a mon.0 172.21.15.130:6789/0 713 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 8 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
wip-health
wip-health
master
smithi
 
rados/basic-luminous/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} objectstore/bluestore-comp.yaml rados.yaml scrub_test.yaml}
"2017-06-27 04:58:01.131872 mon.a mon.0 172.21.15.100:6789/0 123 : cluster [ERR] HEALTH_ERR OSD_SCRUB_ERRORS: 2 scrub errors" in cluster log
wip-health
wip-health
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/failover.yaml}
"2017-06-27 04:56:17.966216 mon.b mon.0 172.21.15.35:6789/0 25 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/alloc-hint.yaml
"2017-06-27 04:54:48.605534 mon.a mon.0 172.21.15.62:6789/0 59 : cluster [WRN] HEALTH_WARN PG_PEERING: 8 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/rest/mgr-restful.yaml
"2017-06-27 04:57:58.734631 mon.a mon.0 172.21.15.88:6789/0 57 : cluster [WRN] HEALTH_WARN PG_PEERING: 8 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi025 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/admin_socket_output.yaml rados.yaml}
"2017-06-27 04:59:17.794209 mon.a mon.0 172.21.15.31:6789/0 162 : cluster [ERR] HEALTH_ERR OSD_FULL: 1 full osd(s)" in cluster log
wip-health
wip-health
master
smithi
 
rados/upgrade/jewel-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-luminous.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} thrashosds-health.yaml}
Command failed on smithi010 with status 22: 'sudo ceph osd new 0b1e14aa-8b86-45cb-8d42-34bc9ca3c001 3'
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-06-27 05:17:46.071742 mon.b mon.0 172.21.15.21:6789/0 6134 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-06-27 05:03:31.889292 mon.a mon.0 172.21.15.68:6789/0 585 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 3 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_python.yaml}
"2017-06-27 05:01:56.446061 mon.b mon.0 172.21.15.5:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/divergent_priors2.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
"2017-06-27 05:00:12.924953 mon.a mon.0 172.21.15.9:6789/0 73 : cluster [WRN] HEALTH_WARN OSD_FLAGS: noout flag(s) set" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-06-27 05:07:52.718902 mon.a mon.0 172.21.15.55:6789/0 2219 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/sync-many.yaml workloads/pool-create-delete.yaml}
"2017-06-27 05:00:04.722962 mon.b mon.0 172.21.15.163:6789/0 93 : cluster [WRN] HEALTH_WARN TOO_FEW_PGS: too few PGs per OSD (1 < min 2)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-06-27 05:04:46.595412 mon.a mon.0 172.21.15.150:6789/0 24 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-06-27 05:13:13.856161 mon.b mon.0 172.21.15.143:6789/0 1673 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/dump-stuck.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
"2017-06-27 05:04:11.534970 mon.b mon.0 172.21.15.47:6789/0 22 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
"2017-06-27 05:03:52.944688 mon.b mon.0 172.21.15.157:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml workloads/redirect_set_object.yaml}
"2017-06-27 05:06:01.156724 mon.a mon.0 172.21.15.19:6789/0 24 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-06-27 05:14:31.749619 mon.b mon.0 172.21.15.22:6789/0 1543 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/cache-fs-trunc.yaml rados.yaml}
"2017-06-27 05:06:16.874026 mon.a mon.0 172.21.15.29:6789/0 134 : cluster [WRN] HEALTH_WARN OSD_CACHE_NO_HIT_SET: 1 cache pools are missing hit_sets" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-06-27 05:05:00.917426 mon.a mon.0 172.21.15.65:6789/0 18 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/rados_stress_watch.yaml}
"2017-06-27 05:06:30.937164 mon.a mon.0 172.21.15.62:6789/0 18 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2017-06-27 05:08:04.142928 mon.a mon.0 172.21.15.18:6789/0 157 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 16 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/ceph_objectstore_tool.yaml
"2017-06-27 05:14:05.740095 mon.a mon.0 172.21.15.144:6789/0 311 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
"2017-06-27 05:24:49.314441 mon.b mon.0 172.21.15.71:6789/0 6242 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-06-27 05:10:30.316399 mon.b mon.0 172.21.15.110:6789/0 808 : cluster [ERR] HEALTH_ERR OSD_OUT_OF_ORDER_FULL: full ratio(s) out of order" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-06-27 05:06:41.690123 mon.a mon.0 172.21.15.112:6789/0 25 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml supported/ubuntu_14.04.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-06-27 05:15:55.508209 mon.b mon.0 172.21.15.49:6789/0 851 : cluster [ERR] HEALTH_ERR OSD_OUT_OF_ORDER_FULL: full ratio(s) out of order" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
"2017-06-27 05:14:45.047390 mon.a mon.0 172.21.15.151:6789/0 512 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 12 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/sync.yaml workloads/rados_5925.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-06-27 05:08:02.848654 mon.b mon.0 172.21.15.13:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/multimon/{clusters/3.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/mon_clock_no_skews.yaml}
'timechecks'
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/mon_recovery.yaml validater/lockdep.yaml}
"2017-06-27 05:16:11.830470 mon.a mon.0 172.21.15.53:6789/0 18 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-06-27 05:17:44.893701 mon.b mon.0 172.21.15.163:6789/0 23 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml}
"2017-06-27 05:10:46.445025 mon.b mon.0 172.21.15.106:6789/0 553 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 6 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
"2017-06-27 05:10:18.140866 mon.b mon.0 172.21.15.1:6789/0 188 : cluster [WRN] HEALTH_WARN TOO_FEW_PGS: too few PGs per OSD (1 < min 2)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
"2017-06-27 05:13:26.398211 mon.b mon.0 172.21.15.35:6789/0 568 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 2 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-06-27 05:10:58.102975 mon.a mon.0 172.21.15.12:6789/0 23 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-06-27 05:10:11.123661 mon.b mon.0 172.21.15.96:6789/0 617 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 2 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_striper.yaml}
"2017-06-27 05:09:18.728302 mon.b mon.0 172.21.15.88:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/lost-unfound.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
"2017-06-27 05:16:43.028494 mon.a mon.0 172.21.15.20:6789/0 336 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 12 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
reached maximum tries (105) after waiting for 630 seconds
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-06-27 05:18:21.626677 mon.a mon.0 172.21.15.9:6789/0 2079 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/ceph-post-file.yaml rados.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-06-27 05:26:45.427435 mon.b mon.0 172.21.15.65:6789/0 3271 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
"2017-06-27 05:09:45.687432 mon.a mon.0 172.21.15.89:6789/0 22 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/mon-config-keys.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml}
"2017-06-27 05:12:49.560567 mon.a mon.0 172.21.15.160:6789/0 46 : cluster [WRN] HEALTH_WARN PG_PEERING: 4 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/filejournal.yaml
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
"2017-06-27 05:20:55.588704 mon.b mon.0 172.21.15.25:6789/0 1513 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 24 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml workloads/redirect.yaml}
"2017-06-27 05:10:47.693480 mon.b mon.0 172.21.15.94:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-06-27 05:17:30.501642 mon.b mon.0 172.21.15.46:6789/0 1319 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 6 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_api_tests.yaml}
wip-health
wip-health
master
smithi
 
rados/basic-luminous/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} objectstore/bluestore.yaml rados.yaml scrub_test.yaml}
"2017-06-27 05:14:47.531366 mon.b mon.0 172.21.15.14:6789/0 125 : cluster [ERR] HEALTH_ERR OSD_SCRUB_ERRORS: 2 scrub errors" in cluster log
wip-health
wip-health
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore.yaml tasks/failover.yaml}
"2017-06-27 05:14:09.852584 mon.b mon.0 172.21.15.33:6789/0 27 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi154 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-06-27 05:17:55.722077 mon.b mon.0 172.21.15.69:6789/0 23 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_workunit_loadgen_big.yaml}
"2017-06-27 05:16:26.078819 mon.a mon.0 172.21.15.132:6789/0 110 : cluster [WRN] HEALTH_WARN TOO_FEW_PGS: too few PGs per OSD (1 < min 2)" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/mon-seesaw.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
"2017-06-27 05:22:48.125428 mon.b mon.0 172.21.15.97:6789/0 1897 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-06-27 05:16:43.778809 mon.a mon.0 172.21.15.29:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-06-27 05:36:20.700930 mon.a mon.0 172.21.15.4:6789/0 3360 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/export-after-evict.yaml rados.yaml}
"2017-06-27 05:17:38.848116 mon.a mon.0 172.21.15.131:6789/0 74 : cluster [WRN] HEALTH_WARN OSD_CACHE_NO_HIT_SET: 1 cache pools are missing hit_sets" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/mon-thrasher.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
Command failed on smithi186 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph quorum_status'
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-06-27 05:21:39.979568 mon.a mon.0 172.21.15.70:6789/0 1235 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 8 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml supported/ubuntu_latest.yaml thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-06-27 05:16:47.371267 mon.b mon.0 172.21.15.81:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-06-27 05:34:07.818569 mon.b mon.0 172.21.15.8:6789/0 3982 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 3 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-06-27 05:28:41.403164 mon.b mon.0 172.21.15.5:6789/0 4492 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml}
"2017-06-27 05:21:28.885185 mon.a mon.0 172.21.15.31:6789/0 655 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 7 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
"2017-06-27 05:21:11.083284 mon.b mon.0 172.21.15.138:6789/0 114 : cluster [WRN] HEALTH_WARN PG_PEERING: 1 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml}
"2017-06-27 05:28:07.187166 mon.a mon.0 172.21.15.2:6789/0 1810 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-06-27 05:26:21.616743 mon.a mon.0 172.21.15.12:6789/0 22 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/filestore-idempotent-aio-journal.yaml
"2017-06-27 05:18:12.670592 mon.a mon.0 172.21.15.88:6789/0 47 : cluster [WRN] HEALTH_WARN PG_PEERING: 8 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-06-27 05:19:15.916318 mon.b mon.0 172.21.15.188:6789/0 22 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_workunit_loadgen_mix.yaml}
"2017-06-27 05:21:21.987335 mon.a mon.0 172.21.15.141:6789/0 24 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/osd-backfill.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
"2017-06-27 05:27:41.994282 mon.a mon.0 172.21.15.93:6789/0 394 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 5 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/multimon/{clusters/6.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
'timechecks'
wip-health
wip-health
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
"2017-06-27 05:26:51.899377 mon.a mon.0 172.21.15.19:6789/0 17 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-06-27 05:24:37.319189 mon.a mon.0 172.21.15.162:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml thrashers/many.yaml workloads/rados_mon_workunits.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-06-27 05:35:04.630957 mon.a mon.0 172.21.15.36:6789/0 3676 : cluster [ERR] HEALTH_ERR OSD_OUT_OF_ORDER_FULL: full ratio(s) out of order" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
"2017-06-27 05:28:43.039520 mon.b mon.0 172.21.15.3:6789/0 1576 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml}
"2017-06-27 05:23:20.195427 mon.a mon.0 172.21.15.58:6789/0 78 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 8 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
"2017-06-27 05:28:19.764149 mon.b mon.0 172.21.15.1:6789/0 851 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 7 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml workloads/redirect_set_object.yaml}
"2017-06-27 05:23:57.047840 mon.a mon.0 172.21.15.92:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-06-27 05:24:56.222890 mon.a mon.0 172.21.15.62:6789/0 30 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/full-tiering.yaml rados.yaml}
Command failed on smithi007 with status 1: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'ceph osd pool set-quota ec-ca max_bytes 0'"
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-06-27 05:24:21.152549 mon.a mon.0 172.21.15.100:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/rados_workunit_loadgen_mostlyread.yaml}
"2017-06-27 05:25:11.816623 mon.b mon.1 172.21.15.175:6789/0 2 : cluster [WRN] message from mon.0 was stamped 0.508636s in the future, clocks not synchronized" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/osd-recovery.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2017-06-27 05:25:24.697352 mon.a mon.0 172.21.15.109:6789/0 104 : cluster [WRN] HEALTH_WARN DEGRADED_OBJECTS: 19444/77984 objects degraded (24.933%)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
"2017-06-27 05:39:45.003245 mon.a mon.0 172.21.15.55:6789/0 3538 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 2 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-06-27 05:32:40.594650 mon.b mon.0 172.21.15.131:6789/0 1060 : cluster [ERR] HEALTH_ERR OSD_OUT_OF_ORDER_FULL: full ratio(s) out of order" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/filestore-idempotent.yaml
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-06-27 05:36:34.394660 mon.a mon.0 172.21.15.68:6789/0 3308 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 23 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/one.yaml workloads/snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/peer.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-06-27 05:27:46.136396 mon.b mon.0 172.21.15.33:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
centos 7.3
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml supported/centos_latest.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-06-27 05:37:48.658638 mon.b mon.0 172.21.15.13:6789/0 3117 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 3 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-06-27 05:36:12.804499 mon.b mon.0 172.21.15.94:6789/0 3979 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-06-27 05:37:21.489746 mon.a mon.0 172.21.15.26:6789/0 3389 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-06-27 05:34:05.590963 mon.a mon.0 172.21.15.77:6789/0 71 : cluster [WRN] HEALTH_WARN TOO_FEW_PGS: too few PGs per OSD (1 < min 2)" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/readwrite.yaml}
"2017-06-27 05:29:53.688754 mon.a mon.0 172.21.15.174:6789/0 25 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/pg-removal-interruption.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml}
"2017-06-27 05:31:07.664116 mon.a mon.0 172.21.15.53:6789/0 226 : cluster [ERR] HEALTH_ERR OSD_OUT_OF_ORDER_FULL: full ratio(s) out of order" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
"2017-06-27 05:33:17.808888 mon.c mon.0 172.21.15.57:6789/0 181 : cluster [WRN] HEALTH_WARN PG_PEERING: 2 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read-overwrites.yaml}
"2017-06-27 05:34:13.113112 mon.b mon.0 172.21.15.66:6789/0 450 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 1 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-06-27 05:36:45.802381 mon.b mon.0 172.21.15.56:6789/0 18 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/basic-luminous/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} objectstore/filestore-btrfs.yaml rados.yaml scrub_test.yaml}
"2017-06-27 05:32:13.892065 mon.b mon.0 172.21.15.54:6789/0 119 : cluster [ERR] HEALTH_ERR OSD_SCRUB_ERRORS: 2 scrub errors" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/filestore-btrfs.yaml tasks/failover.yaml}
"2017-06-27 05:33:28.719075 mon.b mon.0 172.21.15.169:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi135 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/health-warnings.yaml rados.yaml}
"2017-06-27 05:34:16.166505 mon.a mon.0 172.21.15.9:6789/0 194 : cluster [WRN] HEALTH_WARN TOO_FEW_PGS: too few PGs per OSD (1 < min 2)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-06-27 05:41:25.954589 mon.b mon.0 172.21.15.51:6789/0 1909 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 7 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-06-27 05:36:06.581567 mon.b mon.0 172.21.15.101:6789/0 24 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
"2017-06-27 05:42:33.929911 mon.b mon.0 172.21.15.133:6789/0 1287 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 2 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/radostool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml}
"2017-06-27 05:36:42.118896 mon.a mon.0 172.21.15.69:6789/0 117 : cluster [WRN] HEALTH_WARN PG_PEERING: 8 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/multimon/{clusters/6.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml}
"2017-06-27 05:40:15.521596 mon.a mon.0 172.21.15.18:6789/0 35 : cluster [WRN] HEALTH_WARN MON_DOWN: 1/6 mons down, quorum a,b,c,d,f" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
"2017-06-27 05:36:42.851942 mon.b mon.0 172.21.15.59:6789/0 20 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-luminous/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml workloads/redirect.yaml}
"2017-06-27 05:37:03.520528 mon.a mon.0 172.21.15.45:6789/0 32 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_cls_all.yaml validater/lockdep.yaml}
"2017-06-27 05:36:56.463216 mon.a mon.0 172.21.15.95:6789/0 24 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-06-27 05:37:58.130372 mon.b mon.0 172.21.15.21:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/fusestore.yaml
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/one.yaml workloads/pool-create-delete.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-06-27 05:37:24.474417 mon.a mon.0 172.21.15.108:6789/0 18 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/repair_test.yaml}
"2017-06-27 05:40:02.349593 mon.b mon.0 172.21.15.20:6789/0 195 : cluster [ERR] HEALTH_ERR OSD_SCRUB_ERRORS: 1 scrub errors" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/rebuild-mondb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
"2017-06-27 05:40:01.763107 mon.b mon.0 172.21.15.63:6789/0 27 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-06-27 05:52:50.262086 mon.a mon.0 172.21.15.46:6789/0 2213 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/msgr.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-06-27 05:40:10.397947 mon.a mon.0 172.21.15.60:6789/0 269 : cluster [ERR] HEALTH_ERR OSD_OUT_OF_ORDER_FULL: full ratio(s) out of order" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-06-27 05:38:58.340070 mon.a mon.0 172.21.15.40:6789/0 36 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/reg11184.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
"2017-06-27 05:38:10.625606 mon.a mon.0 172.21.15.204:6789/0 69 : cluster [WRN] HEALTH_WARN PG_PEERING: 2 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-06-27 05:46:59.654623 mon.a mon.0 172.21.15.145:6789/0 2496 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-btrfs.yaml rados.yaml supported/ubuntu_14.04.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-06-27 05:49:08.548357 mon.b mon.0 172.21.15.54:6789/0 1917 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 3 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-06-27 05:44:06.855194 mon.b mon.0 172.21.15.58:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-06-27 05:45:08.188195 mon.a mon.0 172.21.15.130:6789/0 1055 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 3 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rgw_snaps.yaml}
"2017-06-27 05:42:27.794514 mon.b mon.0 172.21.15.38:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/resolve_stuck_peering.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
"2017-06-27 05:42:27.009052 mon.a mon.0 172.21.15.200:6789/0 106 : cluster [WRN] HEALTH_WARN OSD_DOWN: 2 osds down" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-06-27 05:47:19.745329 mon.a mon.0 172.21.15.31:6789/0 905 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 2 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/sync-many.yaml workloads/rados_5925.yaml}
"2017-06-27 05:44:04.867177 mon.a mon.0 172.21.15.1:6789/0 114 : cluster [WRN] HEALTH_WARN PG_PEERING: 1 pgs peering" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml}
"2017-06-27 05:53:37.990505 mon.a mon.0 172.21.15.74:6789/0 2579 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
"2017-06-27 05:49:33.905590 mon.a mon.0 172.21.15.162:6789/0 225 : cluster [WRN] HEALTH_WARN PG_PEERING: 1 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
"2017-06-27 05:45:42.100289 mon.a mon.0 172.21.15.9:6789/0 751 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 2 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-06-27 06:53:15.442469 mon.b mon.0 172.21.15.49:6789/0 20 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/keyvaluedb.yaml
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-06-27 06:01:09.901487 mon.b mon.0 172.21.15.35:6789/0 4078 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 3 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
"2017-06-27 05:43:21.840805 mon.b mon.0 172.21.15.76:6789/0 26 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/rest-api.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml}
"2017-06-27 05:44:35.465695 mon.a mon.0 172.21.15.171:6789/0 59 : cluster [WRN] HEALTH_WARN PG_PEERING: 4 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
"2017-06-27 05:46:24.996416 mon.b mon.0 172.21.15.33:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml workloads/redirect_set_object.yaml}
"2017-06-27 05:44:51.578816 mon.b mon.0 172.21.15.100:6789/0 27 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/multi-backfill-reject.yaml rados.yaml}
"2017-06-27 05:46:32.913653 mon.a mon.0 172.21.15.12:6789/0 92 : cluster [WRN] HEALTH_WARN TOO_FEW_PGS: too few PGs per OSD (1 < min 2)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-06-27 05:52:31.317922 mon.a mon.0 172.21.15.3:6789/0 1814 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 4 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-06-27 05:47:51.858082 mon.b mon.0 172.21.15.7:6789/0 20 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
"2017-06-27 06:00:15.474005 mon.b mon.0 172.21.15.49:6789/0 4674 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/rados_api_tests.yaml}
"2017-06-27 05:46:23.607712 mon.b mon.0 172.21.15.71:6789/0 25 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/multimon/{clusters/9.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_clock_no_skews.yaml}
'timechecks'
wip-health
wip-health
master
smithi
 
rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2017-06-27 05:48:12.345456 mon.a mon.0 172.21.15.98:6789/0 95 : cluster [WRN] HEALTH_WARN PG_PEERING: 1 pgs peering" in cluster log
wip-health
wip-health
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml}
"2017-06-27 05:53:09.062572 mon.a mon.0 172.21.15.5:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-06-27 06:01:27.176904 mon.a mon.0 172.21.15.67:6789/0 4933 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic-luminous/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} objectstore/filestore-xfs.yaml rados.yaml scrub_test.yaml}
"2017-06-27 05:51:14.817528 mon.a mon.0 172.21.15.37:6789/0 126 : cluster [ERR] HEALTH_ERR OSD_SCRUB_ERRORS: 2 scrub errors" in cluster log
wip-health
wip-health
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/failover.yaml}
"2017-06-27 05:48:42.394958 mon.a mon.0 172.21.15.56:6789/0 16 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi182 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-06-27 05:51:25.969364 mon.a mon.0 172.21.15.135:6789/0 23 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-06-27 05:49:19.137701 mon.a mon.0 172.21.15.112:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml thrashers/sync.yaml workloads/rados_api_tests.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/thrash-rados/{thrash-rados.yaml thrashosds-health.yaml} msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
"2017-06-27 05:53:03.968453 mon.a mon.0 172.21.15.29:6789/0 118 : cluster [WRN] HEALTH_WARN OSD_DOWN: 1 osds down" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/objectcacher-stress.yaml
"2017-06-27 05:51:53.331790 mon.a mon.0 172.21.15.8:6789/0 43 : cluster [WRN] HEALTH_WARN PG_PEERING: 8 pgs peering" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-06-27 05:58:17.805241 mon.a mon.0 172.21.15.45:6789/0 2762 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-06-27 05:57:13.861491 mon.b mon.0 172.21.15.109:6789/0 1568 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 2 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml supported/ubuntu_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-06-27 06:02:26.171287 mon.a mon.0 172.21.15.95:6789/0 3798 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 4 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/pool-access.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-06-27 06:02:54.421847 mon.a mon.0 172.21.15.18:6789/0 3068 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_cls_all.yaml}
"2017-06-27 05:52:31.983806 mon.b mon.0 172.21.15.65:6789/0 22 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/thrash_cache_writeback_proxy_none.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
"2017-06-27 05:54:43.816171 mon.a mon.0 172.21.15.26:6789/0 101 : cluster [WRN] HEALTH_WARN TOO_FEW_PGS: too few PGs per OSD (1 < min 2)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-06-27 05:54:25.148570 mon.b mon.0 172.21.15.24:6789/0 17 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-06-27 06:00:24.196699 mon.a mon.0 172.21.15.53:6789/0 1991 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 4 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml}
"2017-06-27 05:58:40.741595 mon.b mon.0 172.21.15.4:6789/0 659 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 7 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
"2017-06-27 05:56:36.059581 mon.b mon.0 172.21.15.97:6789/0 147 : cluster [WRN] HEALTH_WARN PG_PEERING: 1 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
"2017-06-27 05:55:41.607258 mon.b mon.0 172.21.15.93:6789/0 285 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 2 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-06-27 06:00:45.176344 mon.b mon.0 172.21.15.90:6789/0 19 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
"2017-06-27 05:59:54.656998 mon.a mon.0 172.21.15.91:6789/0 1801 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
"2017-06-27 05:56:18.076912 mon.a mon.0 172.21.15.40:6789/0 25 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml workloads/redirect.yaml}
"2017-06-27 06:00:31.197706 mon.a mon.0 172.21.15.1:6789/0 521 : cluster [ERR] HEALTH_ERR OSD_OUT_OF_ORDER_FULL: full ratio(s) out of order" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/watch-notify-same-primary.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml}
"2017-06-27 05:55:45.281859 mon.a mon.0 172.21.15.171:6789/0 51 : cluster [WRN] HEALTH_WARN OSD_FLAGS: noout flag(s) set" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-06-27 05:56:45.211744 mon.a mon.0 172.21.15.66:6789/0 26 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_mon_workunits.yaml}
"2017-06-27 06:01:30.176211 mon.b mon.0 172.21.15.68:6789/0 73 : cluster [WRN] HEALTH_WARN PG_PEERING: 2 pgs peering" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-06-27 06:02:57.897648 mon.a mon.0 172.21.15.162:6789/0 20 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
"2017-06-27 06:10:30.293752 mon.a mon.0 172.21.15.19:6789/0 3291 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/objectstore.yaml
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_python.yaml}
"2017-06-27 05:58:39.772462 mon.a mon.0 172.21.15.51:6789/0 28 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/admin-socket.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml}
"2017-06-27 05:56:49.188447 mon.a mon.0 172.21.15.70:6789/0 47 : cluster [WRN] HEALTH_WARN PG_PEERING: 8 pgs peering" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-06-27 06:10:28.723067 mon.a mon.0 172.21.15.20:6789/0 3881 : cluster [ERR] HEALTH_ERR PG_STUCK_UNCLEAN: 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
centos 
rados/singleton-nomsgr/{all/valgrind-leaks.yaml rados.yaml}
"2017-06-27 06:02:47.377453 mon.a mon.0 172.21.15.139:6789/0 57 : cluster [WRN] HEALTH_WARN PG_DEGRADED: 8 pgs degraded" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-06-27 06:06:48.248659 mon.b mon.0 172.21.15.33:6789/0 1374 : cluster [ERR] HEALTH_ERR OSD_OUT_OF_ORDER_FULL: full ratio(s) out of order" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-06-27 06:04:52.227224 mon.b mon.0 172.21.15.57:6789/0 1403 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 2 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/multimon/{clusters/21.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
'timechecks'
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/lockdep.yaml}
"2017-06-27 06:02:52.938701 mon.a mon.0 172.21.15.56:6789/0 865 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 2 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/divergent_priors.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2017-06-27 06:01:09.642230 mon.a mon.0 172.21.15.185:6789/0 180 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 3 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-06-27 06:02:27.900851 mon.b mon.0 172.21.15.30:6789/0 21 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-06-27 06:01:55.381087 mon.b mon.0 172.21.15.100:6789/0 407 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 3 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-06-27 06:08:13.767328 mon.b mon.0 172.21.15.22:6789/0 1260 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
centos 7.3
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml supported/centos_latest.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-06-27 06:10:06.519807 mon.b mon.0 172.21.15.13:6789/0 2796 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-06-27 06:11:49.724606 mon.a mon.0 172.21.15.158:6789/0 2685 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_stress_watch.yaml}
"2017-06-27 06:03:17.670696 mon.b mon.0 172.21.15.76:6789/0 22 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log