ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-health
wip-health
master
smithi
centos 7.3
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-btrfs.yaml rados.yaml supported/centos_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-07-03 17:17:58.810215 mon.b mon.0 172.21.15.86:6789/0 5916 : cluster [WRN] HEALTH_WARN OBJECT_UNFOUND: 32/291 unfound (10.997%)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/divergent_priors.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
"2017-07-03 17:04:38.633938 mon.a mon.0 172.21.15.5:6789/0 138 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-health
wip-health
master
smithi
 
rados/basic-luminous/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} objectstore/bluestore-comp.yaml rados.yaml scrub_test.yaml}
wip-health
wip-health
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/failover.yaml}
"2017-07-03 17:05:40.664825 mon.b mon.0 172.21.15.31:6789/0 169 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 14 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/alloc-hint.yaml
wip-health
wip-health
master
smithi
 
rados/rest/mgr-restful.yaml
"2017-07-03 17:05:02.806442 mon.a mon.0 172.21.15.77:6789/0 122 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi201 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/admin_socket_output.yaml rados.yaml}
"2017-07-03 17:07:12.563511 mon.a mon.0 172.21.15.1:6789/0 149 : cluster [WRN] HEALTH_WARN OSDMAP_FLAGS: full flag(s) set" in cluster log
wip-health
wip-health
master
smithi
 
rados/upgrade/jewel-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-luminous.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} thrashosds-health.yaml}
Command failed on smithi196 with status 1: "SWIFT_TEST_CONFIG_FILE=/home/ubuntu/cephtest/archive/testswift.client.0.conf /home/ubuntu/cephtest/swift/virtualenv/bin/nosetests -w /home/ubuntu/cephtest/swift/test/functional -v -a '!fails_on_rgw'"
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-07-03 17:09:39.224359 mon.a mon.0 172.21.15.98:6789/0 364 : cluster [ERR] overall HEALTH_ERR noscrub flag(s) set; 1 pgs stuck inactive; 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-07-03 17:08:00.203976 mon.b mon.0 172.21.15.66:6789/0 380 : cluster [WRN] HEALTH_WARN POOL_FULL: 1 pool(s) full" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_python.yaml}
"2017-07-03 17:13:36.327549 mon.a mon.0 172.21.15.12:6789/0 263 : cluster [WRN] HEALTH_WARN OBJECT_DEGRADED: 1/2 objects degraded (50.000%)" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/divergent_priors2.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-07-03 17:10:32.107380 mon.a mon.0 172.21.15.26:6789/0 292 : cluster [ERR] overall HEALTH_ERR nodeep-scrub flag(s) set; 1 pgs stuck inactive; 1 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/sync-many.yaml workloads/pool-create-delete.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-07-03 17:11:03.260878 mon.a mon.0 172.21.15.118:6789/0 324 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/dump-stuck.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
"2017-07-03 17:12:48.639609 mon.b mon.0 172.21.15.104:6789/0 238 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
"2017-07-03 17:13:53.254908 mon.b mon.0 172.21.15.34:6789/0 396 : cluster [WRN] overall HEALTH_WARN nodeep-scrub flag(s) set; 2 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/cache-fs-trunc.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/rados_stress_watch.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2017-07-03 17:13:24.018759 mon.a mon.0 172.21.15.109:6789/0 128 : cluster [ERR] HEALTH_ERR PG_INCOMPLETE: 16 pgs incomplete" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/ceph_objectstore_tool.yaml
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-07-03 17:15:24.385380 mon.a mon.0 172.21.15.4:6789/0 236 : cluster [WRN] overall HEALTH_WARN 225/1269 objects degraded (17.730%); 2 pgs degraded" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml supported/ubuntu_14.04.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-07-03 17:23:09.803186 mon.a mon.0 172.21.15.54:6789/0 2022 : cluster [ERR] overall HEALTH_ERR noscrub flag(s) set; 3 pgs stuck inactive; 3 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/sync.yaml workloads/rados_5925.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-07-03 17:17:21.571314 mon.a mon.0 172.21.15.73:6789/0 177 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/multimon/{clusters/3.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/mon_clock_no_skews.yaml}
global name 'self' is not defined
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/mon_recovery.yaml validater/lockdep.yaml}
Command failed on smithi169 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph quorum_status'
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml}
"2017-07-03 17:19:22.967860 mon.a mon.0 172.21.15.31:6789/0 200 : cluster [ERR] overall HEALTH_ERR 1 osds down; 1 pgs degraded; 7 pgs incomplete; 1 pgs undersized" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
"2017-07-03 17:18:44.480175 mon.a mon.0 172.21.15.81:6789/0 746 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
"2017-07-03 17:19:17.575544 mon.a mon.0 172.21.15.204:6789/0 421 : cluster [ERR] overall HEALTH_ERR 1 osds down; 3 pgs degraded; 15 pgs incomplete; 1 pgs stuck unclean; 3 pgs undersized; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-07-03 17:20:16.625444 mon.a mon.0 172.21.15.3:6789/0 211 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-07-03 17:19:28.815466 mon.b mon.0 172.21.15.44:6789/0 942 : cluster [ERR] overall HEALTH_ERR 1 cache pools are missing hit_sets; nodeep-scrub flag(s) set; 1 pgs stuck inactive; 1 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_striper.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/lost-unfound.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-07-03 17:21:17.277095 mon.a mon.0 172.21.15.92:6789/0 829 : cluster [WRN] overall HEALTH_WARN nodeep-scrub flag(s) set" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/ceph-post-file.yaml rados.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/mon-config-keys.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/objectstore/filejournal.yaml
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-07-03 17:22:45.130485 mon.a mon.0 172.21.15.99:6789/0 224 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set; 37/2430 objects degraded (1.523%); 626/2430 objects misplaced (25.761%); 2 pgs backfilling; 1 pgs backfill_toofull" in cluster log
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_api_tests.yaml}
Command failed (workunit test rados/test.sh) on smithi107 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-health
wip-health
master
smithi
 
rados/basic-luminous/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} objectstore/bluestore.yaml rados.yaml scrub_test.yaml}
wip-health
wip-health
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore.yaml tasks/failover.yaml}
"2017-07-03 17:22:43.106173 mon.b mon.0 172.21.15.94:6789/0 165 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 14 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi144 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_workunit_loadgen_big.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/mon-seesaw.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
"2017-07-03 17:42:59.596542 mon.a mon.0 172.21.15.110:6789/0 6253 : cluster [ERR] overall HEALTH_ERR noscrub flag(s) set; 2 pgs stuck inactive; 2 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-07-03 17:28:09.932178 mon.a mon.0 172.21.15.45:6789/0 781 : cluster [ERR] overall HEALTH_ERR noscrub,nodeep-scrub flag(s) set; 1 osds down; 453/1248 objects degraded (36.298%); 7 pgs degraded; 1 pgs stuck inactive; 1 pgs stuck unclean; 7 pgs undersized; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/export-after-evict.yaml rados.yaml}
"2017-07-03 17:24:17.946200 mon.a mon.0 172.21.15.95:6789/0 83 : cluster [WRN] HEALTH_WARN CACHE_POOL_NO_HIT_SET: 1 cache pools are missing hit_sets" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/mon-thrasher.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
"2017-07-03 17:25:46.486873 mon.a mon.0 172.21.15.104:6789/0 140 : cluster [WRN] overall HEALTH_WARN 1/3 mons down, quorum a,b" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-health
wip-health
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml supported/ubuntu_latest.yaml thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-07-03 17:30:09.475749 mon.a mon.0 172.21.15.2:6789/0 1166 : cluster [ERR] overall HEALTH_ERR full ratio(s) out of order; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-07-03 17:28:33.712461 mon.a mon.0 172.21.15.12:6789/0 207 : cluster [WRN] HEALTH_WARN POOL_FULL: 1 pool(s) full" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-07-03 17:32:28.783616 mon.b mon.0 172.21.15.34:6789/0 29 : cluster [WRN] HEALTH_WARN MGR_DOWN: no active mgr" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/filestore-idempotent-aio-journal.yaml
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_workunit_loadgen_mix.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/osd-backfill.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/multimon/{clusters/6.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
global name 'self' is not defined
wip-health
wip-health
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml thrashers/many.yaml workloads/rados_mon_workunits.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-07-03 17:36:43.788048 mon.a mon.0 172.21.15.3:6789/0 165 : cluster [WRN] overall HEALTH_WARN 7/106 objects degraded (6.604%); 1 pgs degraded" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml}
"2017-07-03 17:31:30.463518 mon.a mon.0 172.21.15.49:6789/0 76 : cluster [WRN] HEALTH_WARN OBJECT_MISPLACED: 61268/75280 objects misplaced (81.387%)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
"2017-07-03 17:32:41.819307 mon.b mon.0 172.21.15.85:6789/0 143 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set; 2 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
"2017-07-03 17:35:58.372015 mon.a mon.0 172.21.15.8:6789/0 984 : cluster [ERR] overall HEALTH_ERR 408/2940 objects degraded (13.878%); 57/2940 objects misplaced (1.939%); 2 pgs backfill_wait; 7 pgs degraded; 3 pgs recovery_wait; 1 pgs stuck inactive; 1 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/full-tiering.yaml rados.yaml}
Command failed on smithi106 with status 1: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'ceph osd pool set-quota ec-ca max_bytes 0'"
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-07-03 17:35:26.393621 mon.a mon.0 172.21.15.55:6789/0 465 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set; 1 osds down; 588/1491 objects degraded (39.437%); 261/1491 objects misplaced (17.505%); 6 pgs degraded; 4 pgs undersized" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/rados_workunit_loadgen_mostlyread.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/osd-recovery.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-07-03 17:42:26.422582 mon.b mon.0 172.21.15.83:6789/0 1726 : cluster [ERR] overall HEALTH_ERR nodeep-scrub flag(s) set; 1 osds down; 160/1284 objects degraded (12.461%); 16 pgs degraded; 10 pgs stuck inactive; 10 pgs stuck unclean; 16 pgs undersized" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/filestore-idempotent.yaml
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/one.yaml workloads/snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/peer.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-health
wip-health
master
smithi
centos 7.3
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml supported/centos_latest.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-07-03 17:41:15.553167 mon.b mon.0 172.21.15.137:6789/0 408 : cluster [WRN] overall HEALTH_WARN 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-07-03 17:51:37.024306 mon.a mon.0 172.21.15.144:6789/0 3724 : cluster [ERR] overall HEALTH_ERR noscrub flag(s) set; 4 pgs stuck inactive; 4 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/readwrite.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/pg-removal-interruption.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml}
"2017-07-03 17:49:48.170404 mon.b mon.0 172.21.15.133:6789/0 1770 : cluster [ERR] overall HEALTH_ERR nodeep-scrub flag(s) set; 513/3756 objects degraded (13.658%); 13 pgs degraded; 9 pgs recovery_wait; 3 pgs stuck inactive; 3 pgs stuck unclean; 2 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
"2017-07-03 17:44:55.126267 mon.c mon.0 172.21.15.59:6789/0 376 : cluster [WRN] overall HEALTH_WARN 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read-overwrites.yaml}
"2017-07-03 17:43:32.161421 mon.a mon.0 172.21.15.92:6789/0 165 : cluster [WRN] overall HEALTH_WARN 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-07-03 17:46:26.233250 mon.a mon.0 172.21.15.78:6789/0 777 : cluster [WRN] overall HEALTH_WARN noscrub flag(s) set; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/basic-luminous/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} objectstore/filestore-btrfs.yaml rados.yaml scrub_test.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/filestore-btrfs.yaml tasks/failover.yaml}
"2017-07-03 17:44:18.002906 mon.b mon.0 172.21.15.77:6789/0 151 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 14 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi112 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/health-warnings.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
"2017-07-03 17:56:13.825070 mon.a mon.0 172.21.15.114:6789/0 1861 : cluster [ERR] overall HEALTH_ERR 1 osds down; 516/2610 objects degraded (19.770%); 75 pgs degraded; 15 pgs stuck inactive; 16 pgs stuck unclean; 75 pgs undersized; 2 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
"2017-07-03 17:52:08.435806 mon.b mon.0 172.21.15.118:6789/0 1817 : cluster [ERR] overall HEALTH_ERR noscrub,nodeep-scrub flag(s) set; 1 osds down; 87/912 objects degraded (9.539%); 28 pgs degraded; 2 pgs stuck inactive; 2 pgs stuck unclean; 28 pgs undersized; 2 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/radostool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/multimon/{clusters/6.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml}
"2017-07-03 17:49:23.782382 mon.b mon.0 172.21.15.4:6789/0 11 : cluster [WRN] overall HEALTH_WARN 2/6 mons down, quorum b,d,f,e" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-luminous/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml}
"2017-07-03 17:49:10.204882 mon.b mon.0 172.21.15.51:6789/0 900 : cluster [ERR] overall HEALTH_ERR 1 osds down; 410/2742 objects degraded (14.953%); 12 pgs degraded; 1 pgs stuck inactive; 1 pgs stuck unclean; 12 pgs undersized; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_cls_all.yaml validater/lockdep.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-07-03 17:47:21.315867 mon.a mon.0 172.21.15.105:6789/0 164 : cluster [WRN] overall HEALTH_WARN noscrub,nodeep-scrub flag(s) set" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/fusestore.yaml
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/one.yaml workloads/pool-create-delete.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/repair_test.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/rebuild-mondb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/msgr.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-07-03 18:08:45.277436 mon.a mon.0 172.21.15.26:6789/0 5383 : cluster [ERR] overall HEALTH_ERR nodeep-scrub flag(s) set; 6 pgs stuck inactive; 6 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/reg11184.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
"2017-07-03 18:06:12.075714 mon.b mon.0 172.21.15.7:6789/0 4891 : cluster [ERR] overall HEALTH_ERR noscrub flag(s) set; 1 pgs stuck inactive; 1 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-btrfs.yaml rados.yaml supported/ubuntu_14.04.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-07-03 17:51:49.792865 mon.b mon.0 172.21.15.60:6789/0 153 : cluster [WRN] HEALTH_WARN POOL_FULL: 1 pool(s) full" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-07-03 17:55:04.428068 mon.b mon.0 172.21.15.5:6789/0 253 : cluster [ERR] overall HEALTH_ERR noscrub flag(s) set; 144/2206 objects degraded (6.528%); 1 pgs degraded; 4 pgs stuck inactive; 4 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rgw_snaps.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/resolve_stuck_peering.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/sync-many.yaml workloads/rados_5925.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml fast/fast.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-health
wip-health
master
smithi
 
rados/objectstore/keyvaluedb.yaml
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-07-03 17:54:27.439742 mon.a mon.0 172.21.15.81:6789/0 465 : cluster [WRN] overall HEALTH_WARN noscrub,nodeep-scrub flag(s) set; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/rest-api.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml}
Command failed (workunit test rest/test.py) on smithi077 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rest/test.py'
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
"2017-07-03 17:55:39.569198 mon.a mon.0 172.21.15.88:6789/0 424 : cluster [WRN] overall HEALTH_WARN nodeep-scrub flag(s) set; 678/4395 objects degraded (15.427%); 9 pgs degraded; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/multi-backfill-reject.yaml rados.yaml}
"2017-07-03 18:02:13.287366 mon.a mon.0 172.21.15.2:6789/0 163 : cluster [WRN] HEALTH_WARN OBJECT_DEGRADED: 82408/661524 objects degraded (12.457%)" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-07-03 18:00:51.736890 mon.b mon.0 172.21.15.6:6789/0 942 : cluster [ERR] overall HEALTH_ERR noscrub,nodeep-scrub flag(s) set; 1 osds down; 400/1160 objects degraded (34.483%); 38/1160 objects misplaced (3.276%); 1 pgs backfill_toofull; 13 pgs degraded; 1 pgs stuck inactive; 2 pgs stuck unclean; 13 pgs undersized" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/rados_api_tests.yaml}
wip-health
wip-health
master
smithi
 
rados/multimon/{clusters/9.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_clock_no_skews.yaml}
global name 'self' is not defined
wip-health
wip-health
master
smithi
 
rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml}
saw valgrind issues
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
"2017-07-03 18:09:13.503312 mon.b mon.0 172.21.15.51:6789/0 2777 : cluster [ERR] overall HEALTH_ERR noscrub,nodeep-scrub flag(s) set; 3 pgs stuck inactive; 4 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/basic-luminous/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} objectstore/filestore-xfs.yaml rados.yaml scrub_test.yaml}
wip-health
wip-health
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/failover.yaml}
"2017-07-03 18:03:40.562561 mon.a mon.0 172.21.15.12:6789/0 158 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 14 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi028 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-health TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
"2017-07-03 18:07:15.284987 mon.a mon.0 172.21.15.11:6789/0 1038 : cluster [ERR] overall HEALTH_ERR noscrub,nodeep-scrub flag(s) set; 105/2652 objects misplaced (3.959%); 2 pgs backfilling; 1 pgs stuck inactive; 1 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml thrashers/sync.yaml workloads/rados_api_tests.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/thrash-rados/{thrash-rados.yaml thrashosds-health.yaml} msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/objectstore/objectcacher-stress.yaml
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-07-03 18:16:48.158558 mon.b mon.0 172.21.15.133:6789/0 2765 : cluster [ERR] overall HEALTH_ERR 2 pgs stuck inactive; 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml supported/ubuntu_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
"2017-07-03 18:07:21.766653 mon.b mon.0 172.21.15.96:6789/0 681 : cluster [ERR] overall HEALTH_ERR noscrub,nodeep-scrub flag(s) set; 2 pgs stuck inactive; 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/pool-access.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_cls_all.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton/{all/thrash_cache_writeback_proxy_none.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml}
"2017-07-03 18:17:07.566759 mon.a mon.0 172.21.15.108:6789/0 3218 : cluster [ERR] overall HEALTH_ERR noscrub flag(s) set; 7 pgs stuck inactive; 7 pgs stuck unclean; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
"2017-07-03 18:12:18.602804 mon.a mon.0 172.21.15.92:6789/0 369 : cluster [WRN] overall HEALTH_WARN 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
"2017-07-03 18:14:55.850445 mon.a mon.0 172.21.15.134:6789/0 737 : cluster [ERR] overall HEALTH_ERR noscrub,nodeep-scrub flag(s) set; 1 osds down; 2 pgs degraded; 10 pgs incomplete; 2 pgs undersized; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
"2017-07-03 18:15:13.912290 mon.c mon.0 172.21.15.80:6789/0 285 : cluster [WRN] overall HEALTH_WARN 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
"2017-07-03 18:11:14.450788 mon.b mon.0 172.21.15.112:6789/0 213 : cluster [WRN] overall HEALTH_WARN 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash-luminous/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
"2017-07-03 18:17:11.500981 mon.b mon.0 172.21.15.143:6789/0 1558 : cluster [ERR] overall HEALTH_ERR 2 pgs stuck inactive; 2 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/watch-notify-same-primary.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
"2017-07-03 18:17:00.614508 mon.a mon.0 172.21.15.37:6789/0 1386 : cluster [ERR] overall HEALTH_ERR nodeep-scrub flag(s) set; 2 osds down; 4511/16689 objects degraded (27.030%); 34 pgs degraded; 11 pgs stuck degraded; 20 pgs stuck inactive; 21 pgs stuck unclean; 11 pgs stuck undersized; 34 pgs undersized; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_mon_workunits.yaml}
"2017-07-03 18:12:56.681265 mon.b mon.1 172.21.15.163:6789/0 781 : cluster [ERR] HEALTH_ERR PG_STUCK_INACTIVE: 1 pgs stuck inactive" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
"2017-07-03 18:18:06.602092 mon.a mon.0 172.21.15.150:6789/0 818 : cluster [ERR] overall HEALTH_ERR 1 pgs stuck inactive; 1 pgs stuck unclean" in cluster log
wip-health
wip-health
master
smithi
 
rados/objectstore/objectstore.yaml
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_python.yaml}
"2017-07-03 18:21:01.800744 mon.a mon.0 172.21.15.137:6789/0 251 : cluster [WRN] HEALTH_WARN OBJECT_DEGRADED: 1/2 objects degraded (50.000%)" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/singleton/{all/admin-socket.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-health
wip-health
master
smithi
 
rados/singleton-nomsgr/{all/valgrind-leaks.yaml rados.yaml}
Command failed on smithi004 with status 1: 'find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest'
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
"2017-07-03 18:17:27.139017 mon.b mon.0 172.21.15.22:6789/0 204 : cluster [WRN] overall HEALTH_WARN nodeep-scrub flag(s) set" in cluster log
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/multimon/{clusters/21.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
global name 'self' is not defined
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/lockdep.yaml}
"2017-07-03 18:17:33.867263 mon.a mon.0 172.21.15.56:6789/0 327 : cluster [WRN] HEALTH_WARN POOL_FULL: 1 pool(s) full" in cluster log
wip-health
wip-health
master
smithi
 
rados/singleton/{all/divergent_priors.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2017-07-03 18:20:42.194045 mon.a mon.0 172.21.15.18:6789/0 289 : cluster [WRN] HEALTH_WARN POOL_FULL: 1 pool(s) full" in cluster log
wip-health
wip-health
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
"2017-07-03 18:27:11.750216 mon.a mon.0 172.21.15.31:6789/0 297 : cluster [WRN] overall HEALTH_WARN nodeep-scrub flag(s) set; 415/12158 objects degraded (3.413%); 1 pgs degraded; 1 pools have pg_num > pgp_num" in cluster log
wip-health
wip-health
master
smithi
centos 7.3
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml supported/centos_latest.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-health
wip-health
master
smithi
ubuntu 14.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-btrfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-health
wip-health
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_stress_watch.yaml}