ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
"2019-03-25 10:11:17.741128 mon.a (mon.0) 71 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/force-sync-many.yaml workloads/pool-create-delete.yaml}
"2019-03-25 10:22:10.612041 mon.f (mon.0) 83 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/singleton-nomsgr/{all/health-warnings.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2019-03-25 10:16:38.043253 mon.a (mon.0) 205 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rgw_snaps.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/radosbench_4M_write.yaml}
"2019-03-25 10:19:11.919555 mon.a (mon.0) 84 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 0 --op rmattr 25 --op append 100 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/singleton/{all/erasure-code-nonregression.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/ssh_orchestrator.yaml}
"2019-03-25 10:54:05.954733 mon.a (mon.0) 73 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
psutil.NoSuchProcess process no longer exists (pid=8507)
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/nautilus.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
psutil.NoSuchProcess process no longer exists (pid=7265)
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2019-03-25 10:23:46.961842 mon.a (mon.0) 62 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/few.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op append_excl 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 0 --op write_excl 0 --op rmattr 25 --op append 50 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/multimon/{clusters/3.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/mon_clock_with_skews.yaml}
"2019-03-25 10:27:48.650990 mon.b (mon.0) 57 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_16.04.yaml} workloads/sample_fio.yaml}
"2019-03-25 10:28:46.538980 mon.a (mon.0) 79 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/singleton/{all/lost-unfound.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2019-03-25 10:38:48.380189 mon.a (mon.0) 74 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/singleton-nomsgr/{all/large-omap-object-warnings.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2019-03-25 10:33:54.240404 mon.a (mon.0) 64 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} tasks/scrub_test.yaml}
"2019-03-25 10:41:00.022014 mon.b (mon.0) 91 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/objectstore/{backends/fusestore.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/singleton/{all/max-pg-per-osd.from-mon.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2019-03-25 10:40:28.086707 mon.a (mon.0) 70 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/workunits.yaml}
"2019-03-25 10:38:42.895267 mon.a (mon.0) 78 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/standalone/{supported-random-distro$/{ubuntu_latest.yaml} workloads/mon.yaml}
Command failed (workunit test mon/mon-osdmap-prune.sh) on smithi071 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=51512c6307a29c298db33e79dee5de0af732b3d3 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/mon/mon-osdmap-prune.sh'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rbd_cls.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/sample_radosbench.yaml}
"2019-03-25 10:41:13.751529 mon.a (mon.0) 80 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 50 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/singleton/{all/max-pg-per-osd.from-primary.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2019-03-25 10:43:36.361815 mon.a (mon.0) 113 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/many.yaml workloads/rados_5925.yaml}
"2019-03-25 10:46:50.219221 mon.a (mon.0) 88 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 0 --op rmattr 25 --op append 100 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/rados_api_tests.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/singleton-nomsgr/{all/librados_hello_world.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2019-03-25 10:50:01.273927 mon.a (mon.0) 64 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/singleton/{all/max-pg-per-osd.from-replica.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
"2019-03-25 10:47:01.293039 mon.a (mon.0) 106 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_redirect --low_tier_pool low_tier --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op copy_from 100 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=clay-k=4-m=2.yaml}
Command failed on smithi107 with status 1: '\n sudo yum -y install ceph-radosgw\n '
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{centos_latest.yaml} tasks/crash.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_16.04.yaml} workloads/cosbench_64K_read_write.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/singleton/{all/mon-auth-caps.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2019-03-25 10:54:12.334447 mon.a (mon.0) 82 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --set_chunk --low_tier_pool low_tier --max-ops 4000 --objects 300 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/objectstore/{backends/keyvaluedb.yaml supported-random-distro$/{ubuntu_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 400 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 0 --op rmattr 25 --op append 100 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel-v1only.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/fastclose.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
psutil.NoSuchProcess process no longer exists (pid=30724)
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/singleton/{all/mon-config-key-caps.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2019-03-25 10:58:03.142980 mon.a (mon.0) 75 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op rmattr 25 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/multimon/{clusters/6.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} tasks/mon_recovery.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_cls_all.yaml validater/lockdep.yaml}
Command failed (workunit test cls/test_cls_lock.sh) on smithi005 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=51512c6307a29c298db33e79dee5de0af732b3d3 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_lock.sh'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} tasks/rados_cls_all.yaml}
Command failed (workunit test cls/test_cls_lock.sh) on smithi172 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=51512c6307a29c298db33e79dee5de0af732b3d3 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_lock.sh'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/singleton-nomsgr/{all/msgr.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/perf/{ceph.yaml objectstore/bluestore-low-osd-mem-target.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_16.04.yaml} workloads/cosbench_64K_write.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/singleton/{all/mon-config-keys.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
"2019-03-25 11:03:14.756479 mon.a (mon.0) 79 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/standalone/{supported-random-distro$/{ubuntu_16.04.yaml} workloads/osd.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-low-osd-mem-target.yaml supported-random-distro$/{centos_latest.yaml} tasks/dashboard.yaml}
Command failed on smithi183 with status 1: '\n sudo yum -y install ceph-radosgw\n '
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/singleton/{all/mon-config.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2019-03-25 11:06:09.083913 mon.a (mon.0) 60 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/one.yaml workloads/rados_api_tests.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --write-fadvise-dontneed --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 50 --op write_excl 50 --op delete 10 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/jewel.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml rados.yaml thrashers/none.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_16.04.yaml} workloads/fio_4K_rand_read.yaml}
"2019-03-25 11:06:42.554348 mon.a (mon.0) 82 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/singleton/{all/osd-backfill.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
"2019-03-25 11:13:54.343426 mon.a (mon.0) 69 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_python.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/singleton-nomsgr/{all/multi-backfill-reject.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/objectstore/{backends/objectcacher-stress.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --pool-snaps --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{rhel_latest.yaml} tasks/failover.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/singleton/{all/osd-recovery.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_latest.yaml} workloads/fio_4K_rand_rw.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous-v1only.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/osd-delay.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_stress_watch.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/singleton/{all/peer.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/singleton-nomsgr/{all/pool-access.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/multimon/{clusters/9.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/mon_clock_no_skews.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
centos 7.5
rados/standalone/{supported-random-distro$/{centos_latest.yaml} workloads/scrub.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/singleton/{all/pg-autoscaler.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/fio_4M_rand_read.yaml}
wip-sage-testing-2019-03-24-1032
wip-sage-testing-2019-03-24-1032
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
machine smithi095.front.sepia.ceph.com is locked by scheduled_teuthology@teuthology, not scheduled_sage@teuthology