ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton-nomsgr/{all/recovery-unfound-found.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 01:55:37.107834 mon.a (mon.0) 44 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton/{all/divergent_priors.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 01:55:15.531015 mon.a (mon.0) 74 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_latest.yaml} workloads/sample_radosbench.yaml}
"2020-04-16 02:01:40.117223 mon.a (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/fastclose.yaml msgr/async-v1only.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/redirect.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/divergent_priors2.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 01:54:44.035406 mon.a (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/many.yaml workloads/rados_5925.yaml}
"2020-04-16 01:59:53.747665 mon.f (mon.0) 79 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} tasks/rados_workunit_loadgen_mix.yaml}
"2020-04-16 02:09:56.842416 mon.a (mon.0) 153 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/dump-stuck.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 02:06:54.409908 mon.a (mon.0) 56 : cluster [WRN] Health check failed: too few PGs per OSD (8 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/crash.yaml}
"2020-04-16 02:03:18.255391 mon.b (mon.0) 62 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_latest.yaml} workloads/cosbench_64K_read_write.yaml}
"2020-04-16 02:09:36.395630 mon.a (mon.0) 69 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 02:08:43.544062 mon.a (mon.0) 78 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/fastclose.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml tasks/mon_recovery.yaml validater/lockdep.yaml}
"2020-04-16 02:13:23.195645 mon.b (mon.0) 96 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/dashboard/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/dashboard.yaml}
"2020-04-16 02:16:24.842276 mon.b (mon.0) 61 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/objectstore/{backends/alloc-hint.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 02:20:13.434579 mon.a (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/rest/{mgr-restful.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 02:28:33.862372 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (3 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 
rados/singleton-flat/valgrind-leaks.yaml
"2020-04-16 02:31:27.408212 mon.a (mon.0) 60 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton-nomsgr/{all/admin_socket_output.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 02:34:01.391818 mon.a (mon.0) 53 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/standalone/{supported-random-distro$/{ubuntu_16.04.yaml} workloads/crush.yaml}
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/upgrade/mimic-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/mimic.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-nautilus.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashosds-health.yaml}
'pg_num_target'
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 02:21:46.764220 mon.a (mon.0) 60 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/multimon/{clusters/6.yaml msgr-failures/few.yaml msgr/random.yaml no_pools.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} tasks/mon_recovery.yaml}
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/cosbench_64K_write.yaml}
"2020-04-16 02:26:45.127633 mon.a (mon.0) 66 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton/{all/erasure-code-nonregression.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} tasks/rados_workunit_loadgen_mostlyread.yaml}
"2020-04-16 02:41:25.482069 mon.a (mon.0) 165 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/few.yaml msgr/async.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rbd_cls.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton-nomsgr/{all/balancer.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 02:34:20.951456 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 02:36:15.597596 mon.a (mon.0) 79 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/failover.yaml}
"2020-04-16 03:00:31.033140 mon.a (mon.0) 99 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/fio_4K_rand_read.yaml}
"2020-04-16 02:40:44.235338 mon.a (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/lost-unfound.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 02:46:25.451158 mon.a (mon.0) 61 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/one.yaml workloads/rados_api_tests.yaml}
"2020-04-16 02:58:57.525851 mon.b (mon.0) 119 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/objectstore/{backends/ceph_objectstore_tool.yaml supported-random-distro$/{ubuntu_latest.yaml}}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/readwrite.yaml}
"2020-04-16 02:56:34.581632 mon.a (mon.0) 104 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/max-pg-per-osd.from-mon.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton-nomsgr/{all/cache-fs-trunc.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 02:57:08.435359 mon.a (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/standalone/{supported-random-distro$/{centos_latest.yaml} workloads/erasure-code.yaml}
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 03:02:38.840395 mon.a (mon.0) 62 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_latest.yaml} workloads/fio_4K_rand_rw.yaml}
"2020-04-16 03:02:44.035228 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/max-pg-per-osd.from-primary.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/insights.yaml}
"2020-04-16 03:32:19.321105 mon.b (mon.0) 66 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.6
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml msgr/random.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/max-pg-per-osd.from-replica.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/repair_test.yaml}
"2020-04-16 03:15:50.016329 mon.a (mon.0) 164 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton-nomsgr/{all/ceph-kvstore-tool.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 03:16:56.453963 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/fio_4M_rand_read.yaml}
"2020-04-16 03:10:25.920182 mon.a (mon.0) 68 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/multimon/{clusters/9.yaml msgr-failures/many.yaml msgr/simple.yaml no_pools.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/mon_clock_no_skews.yaml}
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/mon-auth-caps.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 03:18:06.593008 mon.a (mon.0) 74 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/objectstore/{backends/filejournal.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 03:16:58.345666 mon.a (mon.0) 57 : cluster [WRN] Health check failed: too few PGs per OSD (8 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/mon-config-key-caps.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 03:15:23.898375 mon.a (mon.0) 64 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml supported-random-distro$/{centos_latest.yaml} tasks/module_selftest.yaml}
"2020-04-16 03:30:00.431139 mon.b (mon.0) 60 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync-many.yaml workloads/rados_mon_osdmap_prune.yaml}
"2020-04-16 03:25:00.676787 mon.f (mon.0) 77 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/dashboard/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/dashboard.yaml}
"2020-04-16 03:30:21.284724 mon.a (mon.0) 66 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_16.04.yaml} workloads/fio_4M_rand_rw.yaml}
"2020-04-16 03:28:44.196929 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton-nomsgr/{all/ceph-post-file.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/mon-config-keys.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 03:35:04.769676 mon.a (mon.0) 63 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} tasks/rgw_snaps.yaml}
"2020-04-16 04:26:33.941642 mon.b (mon.0) 90 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/random.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/standalone/{supported-random-distro$/{rhel_latest.yaml} workloads/mgr.yaml}
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml thrashers/none.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 03:39:07.373576 mon.a (mon.0) 57 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton/{all/mon-config.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 03:35:20.976944 mon.a (mon.0) 72 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-many-deletes.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_16.04.yaml} workloads/fio_4M_rand_write.yaml}
"2020-04-16 03:44:04.841441 mon.a (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton/{all/osd-backfill.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 03:41:26.698103 mon.a (mon.0) 61 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{centos_latest.yaml} tasks/orchestrator_cli.yaml}
"2020-04-16 03:53:34.148388 mon.b (mon.0) 61 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_cls_all.yaml validater/lockdep.yaml}
"2020-04-16 03:49:36.574058 mon.b (mon.0) 161 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/objectstore/{backends/filestore-idempotent-aio-journal.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 03:46:18.611855 mon.a (mon.0) 57 : cluster [WRN] Health check failed: too few PGs per OSD (6 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton-nomsgr/{all/export-after-evict.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 03:49:28.040300 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/scrub_test.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 03:59:08.175379 mon.a (mon.0) 74 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_latest.yaml} workloads/radosbench_4K_rand_read.yaml}
"2020-04-16 03:57:57.025293 mon.a (mon.0) 72 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/multimon/{clusters/21.yaml msgr-failures/few.yaml msgr/async-v1only.yaml no_pools.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/mon_clock_with_skews.yaml}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/osd-recovery.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 03:59:54.733401 mon.a (mon.0) 72 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync.yaml workloads/rados_mon_workunits.yaml}
"2020-04-16 03:55:04.125048 mon.b (mon.0) 100 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton-nomsgr/{all/full-tiering.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 04:04:09.793892 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.6
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml msgr/async-v1only.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton/{all/peer.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 04:01:01.875592 mon.a (mon.0) 57 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{centos_latest.yaml} tasks/progress.yaml}
"2020-04-16 04:09:04.742631 mon.a (mon.0) 94 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_api_tests.yaml}
"2020-04-16 04:35:34.659608 mon.b (mon.0) 158 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/radosbench_4K_seq_read.yaml}
"2020-04-16 04:03:41.704035 mon.a (mon.0) 64 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/fastclose.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/standalone/{supported-random-distro$/{rhel_latest.yaml} workloads/misc.yaml}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/pg-autoscaler.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 04:15:57.406688 mon.a (mon.0) 61 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/objectstore/{backends/filestore-idempotent.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 04:15:50.184939 mon.a (mon.0) 56 : cluster [WRN] Health check failed: too few PGs per OSD (8 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/pg-removal-interruption.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 04:22:09.590854 mon.a (mon.0) 73 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton-nomsgr/{all/health-warnings.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 04:21:06.284156 mon.a (mon.0) 201 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_latest.yaml} workloads/radosbench_4M_rand_read.yaml}
"2020-04-16 04:23:02.108872 mon.a (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/small-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/prometheus.yaml}
"2020-04-16 05:03:10.866382 mon.b (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/fastclose.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read-overwrites.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/radostool.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 04:29:07.320776 mon.a (mon.0) 79 : cluster [WRN] Health check failed: too few PGs per OSD (3 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} tasks/rados_cls_all.yaml}
"2020-04-16 05:07:45.140933 mon.a (mon.0) 114 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/dashboard/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/dashboard.yaml}
"2020-04-16 04:30:21.198490 mon.a (mon.0) 66 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/force-sync-many.yaml workloads/snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.6
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml msgr/async.yaml rados.yaml thrashers/careful.yaml thrashosds-health.yaml workloads/radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/random-eio.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 04:46:54.938704 mon.a (mon.0) 119 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_latest.yaml} workloads/radosbench_4M_seq_read.yaml}
"2020-04-16 04:39:49.723547 mon.a (mon.0) 70 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton-nomsgr/{all/large-omap-object-warnings.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 04:41:55.176523 mon.a (mon.0) 56 : cluster [WRN] Health check failed: too few PGs per OSD (8 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/multimon/{clusters/3.yaml msgr-failures/many.yaml msgr/async-v2only.yaml no_pools.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/mon_recovery.yaml}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=clay-k=4-m=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/rebuild-mondb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 04:43:31.281642 mon.a (mon.0) 49 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/objectstore/{backends/fusestore.yaml supported-random-distro$/{ubuntu_latest.yaml}}
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} tasks/rados_python.yaml}
"2020-04-16 04:50:52.614600 mon.b (mon.0) 114 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/standalone/{supported-random-distro$/{rhel_latest.yaml} workloads/mon.yaml}
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/ssh_orchestrator.yaml}
"2020-04-16 04:48:28.252002 mon.a (mon.0) 59 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/recovery-preemption.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 04:47:16.064345 mon.a (mon.0) 62 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_latest.yaml} workloads/radosbench_4M_write.yaml}
"2020-04-16 04:53:32.146937 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 04:56:44.861616 mon.a (mon.0) 56 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton-nomsgr/{all/lazy_omap_stats_output.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 04:52:19.643765 mon.a (mon.0) 68 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton/{all/resolve_stuck_peering.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 05:08:25.593829 mon.a (mon.0) 82 : cluster [WRN] Health check failed: too few PGs per OSD (3 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/test-crash.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 05:00:59.916182 mon.a (mon.0) 69 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/sample_fio.yaml}
"2020-04-16 04:56:17.483666 mon.a (mon.0) 64 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.6
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/fastclose.yaml msgr/random.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rbd_cls.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/rados_stress_watch.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/workunits.yaml}
"2020-04-16 05:38:04.303025 mon.a (mon.0) 72 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/many.yaml workloads/pool-create-delete.yaml}
"2020-04-16 05:51:53.675075 mon.b (mon.0) 101 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml tasks/rados_api_tests.yaml validater/lockdep.yaml}
"2020-04-16 05:44:45.801353 mon.b (mon.0) 97 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 05:50:26.521400 mon.a (mon.0) 72 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/objectstore/{backends/keyvaluedb.yaml supported-random-distro$/{centos_latest.yaml}}
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton-nomsgr/{all/librados_hello_world.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 05:43:35.774230 mon.a (mon.0) 54 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/thrash-backfill-full.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/sample_radosbench.yaml}
"2020-04-16 05:50:29.372941 mon.a (mon.0) 68 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/multimon/{clusters/6.yaml msgr-failures/few.yaml msgr/async.yaml no_pools.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} tasks/mon_clock_no_skews.yaml}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/standalone/{supported-random-distro$/{ubuntu_16.04.yaml} workloads/osd.yaml}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/thrash-eio.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_striper.yaml}
"2020-04-16 05:57:27.339149 mon.a (mon.0) 103 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton-nomsgr/{all/msgr.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{rhel_latest.yaml} tasks/crash.yaml}
"2020-04-16 06:14:22.854578 mon.a (mon.0) 71 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/cosbench_64K_read_write.yaml}
"2020-04-16 06:05:12.485939 mon.a (mon.0) 68 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 06:14:20.374283 mon.a (mon.0) 74 : cluster [WRN] Health check failed: too few PGs per OSD (5 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/thrash-rados/{thrash-rados.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/dashboard/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/dashboard.yaml}
"2020-04-16 06:12:59.929704 mon.a (mon.0) 87 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/few.yaml msgr/simple.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/thrash_cache_writeback_proxy_none.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 06:13:30.822206 mon.a (mon.0) 142 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/objectstore/{backends/objectcacher-stress.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 06:12:09.079055 mon.a (mon.0) 52 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_workunit_loadgen_big.yaml}
"2020-04-16 06:30:41.015874 mon.a (mon.0) 102 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_latest.yaml} workloads/cosbench_64K_write.yaml}
"2020-04-16 06:24:47.047005 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/one.yaml workloads/rados_5925.yaml}
"2020-04-16 06:18:22.258991 mon.a (mon.0) 76 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton-nomsgr/{all/multi-backfill-reject.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 06:27:45.185397 mon.a (mon.0) 119 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/watch-notify-same-primary.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 06:26:08.957049 mon.a (mon.0) 68 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{centos_latest.yaml} tasks/failover.yaml}
"2020-04-16 06:37:39.401734 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_cls_all.yaml validater/valgrind.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/admin-socket.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 06:35:13.914444 mon.a (mon.0) 55 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/fio_4K_rand_read.yaml}
"2020-04-16 06:32:31.802250 mon.a (mon.0) 68 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/singleton/{all/deduptool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2020-04-16 06:35:14.553247 mon.a (mon.0) 68 : cluster [WRN] Health check failed: too few PGs per OSD (3 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/standalone/{supported-random-distro$/{ubuntu_16.04.yaml} workloads/scrub.yaml}
Command failed (workunit test scrub/osd-scrub-snaps.sh) on smithi013 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=c7da604cb101cbe78a257a29498a98c69964e0a6 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-scrub-snaps.sh'
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton-nomsgr/{all/pool-access.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 06:44:20.671906 mon.a (mon.0) 56 : cluster [WRN] Health check failed: too few PGs per OSD (4 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} tasks/rados_workunit_loadgen_mix.yaml}
"2020-04-16 06:48:38.583043 mon.b (mon.0) 141 : cluster [WRN] Health check failed: too few PGs per OSD (1 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/multimon/{clusters/9.yaml msgr-failures/many.yaml msgr/random.yaml no_pools.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/mon_clock_with_skews.yaml}
nautilus
nautilus
py2
smithi
centos 7.6
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml msgr/async-v1only.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
centos 7.5
rados/singleton/{all/divergent_priors.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2020-04-16 06:44:25.853004 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/redirect.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
rhel 7.5
rados/objectstore/{backends/objectstore.yaml supported-random-distro$/{rhel_latest.yaml}}
nautilus
nautilus
py2
smithi
rhel 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2020-04-16 06:51:21.011712 mon.a (mon.0) 59 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_16.04.yaml} workloads/fio_4K_rand_rw.yaml}
"2020-04-16 06:49:21.233664 mon.a (mon.0) 67 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 18.04
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/insights.yaml}
"2020-04-16 06:51:34.189303 mon.b (mon.0) 97 : cluster [WRN] Health check failed: too few PGs per OSD (3 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
ubuntu 16.04
rados/singleton/{all/divergent_priors2.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2020-04-16 06:49:06.310419 mon.a (mon.0) 77 : cluster [WRN] Health check failed: too few PGs per OSD (2 < min 30) (TOO_FEW_PGS)" in cluster log
nautilus
nautilus
py2
smithi
rhel 7.5
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds