ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
ubuntu 18.04
rados:monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync.yaml workloads/rados_mon_osdmap_prune.yaml}
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
ubuntu 18.04
rados:monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/force-sync-many.yaml workloads/rados_mon_workunits.yaml}
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
centos 7.5
rados:monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/many.yaml workloads/snaps-few-objects.yaml}
"2019-02-19 00:20:39.363997 mon.a (mon.0) 92 : cluster [WRN] Health check failed: 4 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
ubuntu 16.04
rados:monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/one.yaml workloads/pool-create-delete.yaml}
Command crashed: 'sudo ceph --cluster ceph osd crush tunables default'
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
rhel 7.5
rados:monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/sync-many.yaml workloads/rados_5925.yaml}
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
ubuntu 16.04
rados:monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/sync.yaml workloads/rados_api_tests.yaml}
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
rhel 7.5
rados:monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/force-sync-many.yaml workloads/rados_mon_osdmap_prune.yaml}
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
centos 7.5
rados:monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} thrashers/many.yaml workloads/rados_mon_workunits.yaml}
"2019-02-19 00:21:30.433246 mon.b (mon.0) 163 : cluster [WRN] Health check failed: 3 osds down (OSD_DOWN)" in cluster log
wip-sage2-testing-2019-02-18-1346
wip-sage2-testing-2019-02-18-1346
master
smithi
rhel 7.5
rados:monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/one.yaml workloads/snaps-few-objects.yaml}
"2019-02-19 00:20:36.461851 mon.f (mon.0) 93 : cluster [WRN] Health check failed: 4 osds down (OSD_DOWN)" in cluster log