ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
centos 7.6
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{centos_7.yaml} tasks/orchestrator_cli.yaml}
"2019-09-15T16:06:46.889283+0000 mds.a (mds.0) 1 : cluster [WRN] evicting unresponsive client mira101:x (4662), after 302.397 seconds" in cluster log
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
centos 7.6
rados/standalone/{supported-random-distro$/{centos_7.yaml} workloads/erasure-code.yaml}
Command failed (workunit test erasure-code/test-erasure-code.sh) on mira110 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=89d631060fe9116c630d52b252ef94de20b166d0 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/erasure-code/test-erasure-code.sh'
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
rhel 7.6
rados/perf/{ceph.yaml objectstore/bluestore-basic-min-osd-mem-target.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_7.yaml} workloads/radosbench_omap_write.yaml}
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/cosbench_64K_read_write.yaml}
Command failed on mira027 with status 1: 'find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest'
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
ubuntu 18.04
rados/perf/{ceph.yaml objectstore/bluestore-low-osd-mem-target.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/cosbench_64K_write.yaml}
Command failed on mira088 with status 1: 'find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest'
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
rhel 7.6
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{rhel_7.yaml} tasks/dashboard.yaml}
Test failure: test_cephfs_get (tasks.mgr.dashboard.test_cephfs.CephfsTest)
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
centos 7.6
rados/standalone/{supported-random-distro$/{centos_7.yaml} workloads/mon.yaml}
Command failed (workunit test mon/mon-osdmap-prune.sh) on mira027 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=89d631060fe9116c630d52b252ef94de20b166d0 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/mon/mon-osdmap-prune.sh'
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
centos 7.6
rados/standalone/{supported-random-distro$/{centos_7.yaml} workloads/osd.yaml}
Command failed (workunit test osd/divergent-priors.sh) on mira088 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=89d631060fe9116c630d52b252ef94de20b166d0 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/osd/divergent-priors.sh'
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
wip-kefu-testing-2019-09-15-1533
wip-kefu-testing-2019-09-15-1533
master
mira
centos 7.6
rados/standalone/{supported-random-distro$/{centos_7.yaml} workloads/scrub.yaml}
Command failed (workunit test scrub/osd-recovery-scrub.sh) on mira101 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=89d631060fe9116c630d52b252ef94de20b166d0 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-recovery-scrub.sh'