ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri5-testing-2020-05-04-1554-nautilus
wip-yuri5-testing-2020-05-04-1554-nautilus
py2
smithi
centos 7.5
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{centos_latest.yaml} tasks/module_selftest.yaml}
"2020-05-05 00:08:58.074887 mds.b (mds.0) 1 : cluster [WRN] evicting unresponsive client smithi075:y (4660), after 302.719 seconds" in cluster log
wip-yuri5-testing-2020-05-04-1554-nautilus
wip-yuri5-testing-2020-05-04-1554-nautilus
py2
smithi
ubuntu 16.04
rados/upgrade/mimic-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/mimic.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-nautilus.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} supported-random-distro$/{ubuntu_16.04.yaml} thrashosds-health.yaml}
'wait_until_healthy' reached maximum tries (150) after waiting for 900 seconds
wip-yuri5-testing-2020-05-04-1554-nautilus
wip-yuri5-testing-2020-05-04-1554-nautilus
py2
smithi
centos 7.5
rados/singleton-nomsgr/{all/balancer.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
wip-yuri5-testing-2020-05-04-1554-nautilus
wip-yuri5-testing-2020-05-04-1554-nautilus
py2
smithi
rhel 7.5
rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
Command failed (workunit test rados/test_envlibrados_for_rocksdb.sh) on smithi089 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=785d4d2d77bf8765f0bcd5e2b7bed4f857d0eef9 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_envlibrados_for_rocksdb.sh'
wip-yuri5-testing-2020-05-04-1554-nautilus
wip-yuri5-testing-2020-05-04-1554-nautilus
py2
smithi
rhel 7.5
rados/standalone/{supported-random-distro$/{rhel_latest.yaml} workloads/osd.yaml}
wip-yuri5-testing-2020-05-04-1554-nautilus
wip-yuri5-testing-2020-05-04-1554-nautilus
py2
smithi
centos 7.6
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml thrashers/careful.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
wip-yuri5-testing-2020-05-04-1554-nautilus
wip-yuri5-testing-2020-05-04-1554-nautilus
py2
smithi
ubuntu 18.04
rados/mgr/{clusters/{2-node-mgr.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/module_selftest.yaml}
"2020-05-04 23:58:21.716448 mds.a (mds.0) 1 : cluster [WRN] evicting unresponsive client smithi203:y (14644), after 301.491 seconds" in cluster log
wip-yuri5-testing-2020-05-04-1554-nautilus
wip-yuri5-testing-2020-05-04-1554-nautilus
py2
smithi
centos 7.5
rados/objectstore/{backends/objectstore.yaml supported-random-distro$/{centos_latest.yaml}}
Command failed on smithi163 with status 134: 'sudo TESTDIR=/home/ubuntu/cephtest bash -c \'mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-filestore 20 --debug-bluestore 20" ceph_test_objectstore --gtest_filter=-*/3 --gtest_catch_exceptions=0\''