ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
centos 
rados/singleton-nomsgr/{all/valgrind-leaks.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
reached maximum tries (800) after waiting for 4800 seconds
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/sync.yaml workloads/snaps-few-objects.yaml}
"2017-11-20 15:23:51.520613 mon.a mon.0 172.21.15.91:6789/0 159 : cluster [WRN] Health check failed: 3 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/thrash-rados/{thrash-rados.yaml thrashosds-health.yaml} msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml settings/optimized.yaml workloads/sample_radosbench.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
timed out waiting for admin_socket to appear after osd.3 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/multimon/{clusters/9.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_recovery.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_cls_all.yaml validater/valgrind.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Command failed (workunit test rados/test.sh) on smithi010 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/thrash_cache_writeback_proxy_none.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect.yaml}
Command failed on smithi093 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 7'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
timed out waiting for admin_socket to appear after osd.7 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_stress_watch.yaml}
"2017-11-20 15:22:09.951591 mon.a mon.0 172.21.15.152:6789/0 196 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/watch-notify-same-primary.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Socket is closed
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore-bitmap.yaml tasks/dashboard.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/alloc-hint.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml settings/optimized.yaml workloads/fio_4K_rand_read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/rest/mgr-restful.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi081 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/admin_socket_output.yaml rados.yaml}
"2017-11-20 15:23:54.806591 mon.a mon.0 172.21.15.156:6789/0 98 : cluster [WRN] Health check failed: Reduced data availability: 8 pgs inactive (PG_AVAILABILITY)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/standalone/crush.yaml
Command failed (workunit test crush/crush-classes.sh) on smithi174 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/crush/crush-classes.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/upgrade/luminous-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/luminous.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-mimic.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} thrashosds-health.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported/ubuntu_14.04.yaml thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml thrashers/sync.yaml workloads/pool-create-delete.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/admin-socket.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2017-11-20 15:29:41.075117 mon.a mon.0 172.21.15.161:6789/0 60 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
reached maximum tries (500) after waiting for 3000 seconds
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_striper.yaml}
"2017-11-20 15:45:40.235818 mon.a mon.0 172.21.15.1:6789/0 205 : cluster [WRN] Health check failed: 4 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/divergent_priors.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml settings/optimized.yaml workloads/fio_4K_rand_rw.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
Command failed on smithi170 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 11'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/cache-fs-trunc.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/divergent_priors2.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Command failed (workunit test rados/test.sh) on smithi172 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/ceph_objectstore_tool.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/dump-stuck.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_5925.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_workunit_loadgen_big.yaml}
"2017-11-20 15:58:14.763633 mon.b mon.0 172.21.15.132:6789/0 196 : cluster [WRN] Health check failed: 6 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore.yaml settings/optimized.yaml workloads/fio_4M_rand_read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
timed out waiting for admin_socket to appear after osd.5 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/multimon/{clusters/21.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_clock_no_skews.yaml}
"2017-11-20 16:02:37.342693 mon.b mon.0 172.21.15.59:6789/0 55 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_recovery.yaml validater/lockdep.yaml}
"2017-11-20 16:00:40.072261 mon.b mon.0 172.21.15.108:6789/0 204 : cluster [WRN] Health check failed: 5 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
timeout expired in wait_for_all_osds_up
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
timed out waiting for admin_socket to appear after osd.1 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/ceph-kvstore-tool.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/standalone/erasure-code.yaml
Command failed (workunit test erasure-code/test-erasure-code-plugins.sh) on smithi039 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/erasure-code/test-erasure-code-plugins.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
reached maximum tries (500) after waiting for 3000 seconds
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml supported/ubuntu_latest.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/erasure-code-nonregression.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/failover.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml settings/optimized.yaml workloads/fio_4M_rand_rw.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_workunit_loadgen_mix.yaml}
"2017-11-20 16:19:17.225911 mon.a mon.0 172.21.15.28:6789/0 206 : cluster [WRN] Health check failed: 5 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/many.yaml workloads/rados_api_tests.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
timed out waiting for admin_socket to appear after osd.2 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/filejournal.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/lost-unfound.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
timed out waiting for admin_socket to appear after osd.2 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml settings/optimized.yaml workloads/fio_4M_rand_write.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi143 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/ceph-post-file.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
timed out waiting for admin_socket to appear after osd.1 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_workunit_loadgen_mostlyread.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-mon.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
reached maximum tries (800) after waiting for 4800 seconds
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
timed out waiting for admin_socket to appear after osd.4 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-primary.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Command failed (workunit test rados/test.sh) on smithi154 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/one.yaml workloads/rados_mon_workunits.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml settings/optimized.yaml workloads/radosbench_4K_rand_read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/max-pg-per-osd.from-replica.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
timed out waiting for admin_socket to appear after osd.7 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/readwrite.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/multimon/{clusters/21.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/filestore-idempotent-aio-journal.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/export-after-evict.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/standalone/misc.yaml
Command failed (workunit test misc/rados-striper.sh) on smithi083 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/misc/rados-striper.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
reached maximum tries (800) after waiting for 4800 seconds
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
centos 7.4
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore.yaml rados.yaml supported/centos_latest.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
timed out waiting for admin_socket to appear after osd.0 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
Command failed (workunit test rados/test.sh) on smithi067 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/mon-auth-caps.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
timed out waiting for admin_socket to appear after osd.3 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
reached maximum tries (500) after waiting for 3000 seconds
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore.yaml tasks/module_selftest.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore.yaml settings/optimized.yaml workloads/radosbench_4K_seq_read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/mon-config-keys.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2017-11-20 17:07:13.799257 mon.a mon.0 172.21.15.140:6789/0 53 : cluster [WRN] Health check failed: Reduced data availability: 8 pgs inactive (PG_AVAILABILITY)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
timed out waiting for admin_socket to appear after osd.10 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml thrashers/sync-many.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/repair_test.yaml}
timed out waiting for admin_socket to appear after osd.6 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
timed out waiting for admin_socket to appear after osd.4 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/mon-seesaw.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/full-tiering.yaml rados.yaml}
"2017-11-20 17:13:21.752184 mon.a mon.0 172.21.15.143:6789/0 84 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command failed on smithi007 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 5'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml settings/optimized.yaml workloads/radosbench_4M_rand_read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/osd-backfill.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/filestore-idempotent.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
timed out waiting for admin_socket to appear after osd.2 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rgw_snaps.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/sync-many.yaml workloads/pool-create-delete.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
ubuntu 14.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml supported/ubuntu_14.04.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
timed out waiting for admin_socket to appear after osd.2 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml settings/optimized.yaml workloads/radosbench_4M_seq_read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
Command failed (workunit test cephtool/test.sh) on smithi097 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/health-warnings.yaml rados.yaml}
Command failed (workunit test rados/test_health_warnings.sh) on smithi044 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_health_warnings.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/standalone/mon.yaml
Command failed (workunit test mon/osd-pool-df.sh) on smithi029 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/mon/osd-pool-df.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
timed out waiting for admin_socket to appear after osd.3 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/osd-recovery.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
timed out waiting for admin_socket to appear after osd.3 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/multimon/{clusters/3.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/mon_recovery.yaml}
"2017-11-20 17:48:24.418280 mon.a mon.0 172.21.15.28:6789/0 68 : cluster [WRN] Health check failed: Reduced data availability: 8 pgs inactive (PG_AVAILABILITY)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml rados.yaml thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read-overwrites.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_cls_all.yaml validater/lockdep.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
timed out waiting for admin_socket to appear after osd.4 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/peer.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
timed out waiting for admin_socket to appear after osd.2 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/scrub_test.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Command failed on smithi076 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 4'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/fusestore.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command failed on smithi184 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 6'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/pg-removal-interruption.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/workunits.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml settings/optimized.yaml workloads/radosbench_4M_write.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/sync.yaml workloads/rados_5925.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml}
reached maximum tries (500) after waiting for 3000 seconds
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/large-omap-object-warnings.yaml rados.yaml}
"2017-11-20 18:04:19.074163 mon.a mon.0 172.21.15.143:6789/0 61 : cluster [WRN] Health check failed: Reduced data availability: 8 pgs inactive (PG_AVAILABILITY)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
timed out waiting for admin_socket to appear after osd.0 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/radostool.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
"2017-11-20 23:27:48.999903 mon.a mon.0 172.21.15.95:6789/0 94 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
"2017-11-20 23:28:40.288352 mon.a mon.0 172.21.15.100:6789/0 13 : cluster [WRN] mon.1 172.21.15.116:6789/0 clock skew 0.801415s > max 0.5s" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
timed out waiting for admin_socket to appear after osd.5 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_api_tests.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
"2017-11-20 23:34:11.532906 mon.c mon.2 172.21.15.53:6790/0 2 : cluster [WRN] message from mon.0 was stamped 0.704200s in the future, clocks not synchronized" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
timed out waiting for admin_socket to appear after osd.5 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/random-eio.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
"2017-11-20 23:36:41.094861 mon.a mon.0 172.21.15.23:6789/0 168 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore.yaml settings/optimized.yaml workloads/sample_fio.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml objectstore/bluestore.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Command failed (workunit test rados/test.sh) on smithi048 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/rebuild-mondb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect.yaml}
timed out waiting for admin_socket to appear after osd.0 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
ubuntu 16.04
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported/ubuntu_latest.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
timed out waiting for admin_socket to appear after osd.1 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/keyvaluedb.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/msgr.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/standalone/osd.yaml
Command failed (workunit test osd/osd-copy-from.sh) on smithi093 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/osd/osd-copy-from.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/force-sync-many.yaml workloads/rados_api_tests.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
timed out waiting for admin_socket to appear after osd.7 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/recovery-preemption.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/rados_cls_all.yaml}
"2017-11-20 23:45:15.090792 mon.a mon.0 172.21.15.59:6789/0 200 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml settings/optimized.yaml workloads/sample_radosbench.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/reg11184.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/multimon/{clusters/6.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_clock_no_skews.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/fastclose.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml}
timed out waiting for admin_socket to appear after osd.2 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/resolve_stuck_peering.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/dashboard.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml settings/optimized.yaml workloads/fio_4K_rand_read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/multi-backfill-reject.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
timed out waiting for admin_socket to appear after osd.6 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_python.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
timed out waiting for admin_socket to appear after osd.6 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml thrashers/many.yaml workloads/rados_mon_workunits.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
timed out waiting for admin_socket to appear after osd.4 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/rest-api.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/objectcacher-stress.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Command failed (workunit test rados/test.sh) on smithi036 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
"2017-11-21 00:11:35.476031 mon.a mon.0 172.21.15.59:6789/0 82 : cluster [WRN] Health check failed: 2 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml settings/optimized.yaml workloads/fio_4K_rand_rw.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
Exiting scrub checking -- not all pgs scrubbed.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_stress_watch.yaml}
"2017-11-21 00:20:35.688980 mon.b mon.0 172.21.15.142:6789/0 141 : cluster [WRN] Health check failed: 3 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
centos 7.4
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml supported/centos_latest.yaml thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/thrash-eio.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton-nomsgr/{all/pool-access.yaml rados.yaml}
"2017-11-21 00:26:00.754019 mon.a mon.0 172.21.15.31:6789/0 61 : cluster [WRN] Health check failed: Reduced data availability: 8 pgs inactive (PG_AVAILABILITY)" in cluster log
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/standalone/scrub.yaml
Command failed (workunit test scrub/osd-scrub-repair.sh) on smithi058 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=wip-sage2-testing-2017-11-20-0728 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-scrub-repair.sh'
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml thrashers/one.yaml workloads/snaps-few-objects.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/redirect.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/thrash-rados/{thrash-rados.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/small-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
timed out waiting for admin_socket to appear after osd.3 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml settings/optimized.yaml workloads/fio_4M_rand_read.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
timed out waiting for admin_socket to appear after osd.7 restart
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/objectstore/objectstore.yaml
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/singleton/{all/thrash_cache_writeback_proxy_none.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml}
failed to recover before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_striper.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
failed to become clean before timeout expired
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/multimon/{clusters/9.yaml mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/mon_clock_with_skews.yaml}
wip-sage2-testing-2017-11-20-0728
wip-sage2-testing-2017-11-20-0728
master
smithi
 
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}