ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/singleton/{all/recovery-preemption.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/simple.yaml objectstore/bluestore.yaml rados.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml}
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/standalone/mon.yaml
wip-kefu-testing-2018-02-20-2052
wip-kefu-testing-2018-02-20-2052
master
mira
 
rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 400 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op setattr 25 --op read 100 --op copy_from 50 --op write 0 --op rmattr 25 --op append 100 --op delete 50 --pool unique_pool_0'