ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
rhel 8.1
rados/cephadm/smoke-roleless/{distro/rhel_latest start}
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
centos 7.6
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/luminous-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_7.6} msgr-failures/osd-delay rados thrashers/mapgap thrashosds-health workloads/cache-snaps}
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
centos 8.1
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-active-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/crush-compat msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/dedup_tier}
Command failed on smithi203 with status 1: 'sudo yum -y install ceph-mgr-diskprediction-local'
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
ubuntu 18.04
rados/perf/{ceph objectstore/bluestore-basic-min-osd-mem-target openstack settings/optimized ubuntu_latest workloads/fio_4K_rand_rw}
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
rhel 8.0
rados/cephadm/with-work/{distro/rhel_8.0 fixed-2 mode/root msgr/async-v1only start tasks/rados_python}
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
centos 8.1
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} msgr-failures/few msgr/async objectstore/bluestore-comp-snappy rados tasks/rados_cls_all validater/valgrind}
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
rhel 8.1
rados/cephadm/smoke-roleless/{distro/rhel_latest start}
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
centos 8.1
rados/singleton/{all/thrash_cache_writeback_proxy_none msgr-failures/few msgr/async objectstore/bluestore-stupid rados supported-random-distro$/{centos_8}}
Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 10000 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op read 100 --op write 50 --op delete 50 --op copy_from 50 --op write_excl 50 --pool base'
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
centos 7.6
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v2only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_7.6} msgr-failures/few rados thrashers/default thrashosds-health workloads/cache-snaps}
reached maximum tries (180) after waiting for 180 seconds
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
centos 8.1
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados tasks/rados_api_tests validater/valgrind}
wip-kefu-testing-2020-07-30-2107
wip-kefu-testing-2020-07-30-2107
master
smithi
ubuntu 18.04
rados/upgrade/nautilus-x-singleton/{0-cluster/{openstack start} 1-install/nautilus 2-partial-upgrade/firsthalf 3-thrash/default 4-workload/{rbd-cls rbd-import-export readwrite snaps-few-objects} 5-workload/{radosbench rbd_api} 6-finish-upgrade 7-octopus 8-workload/{rbd-python snaps-many-objects} bluestore-bitmap thrashosds-health ubuntu_latest}
"2020-07-31T14:08:20.993682+0000 osd.10 (osd.10) 100 : cluster [ERR] 5.0 deep-scrub : stat mismatch, got 1/4 objects, 0/0 clones, 0/0 dirty, 0/0 omap, 0/0 pinned, 1/4 hit_set_archive, 0/0 whiteouts, 1189/6549 bytes, 0/0 manifest objects, 1189/6549 hit_set_archive bytes." in cluster log