ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
centos 7.4
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported/centos_latest.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
Command failed on smithi153 with status 1: 'sudo yum -y install python34-cephfs'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/upgrade/jewel-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-luminous.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} thrashosds-health.yaml}
Command failed (workunit test rbd/test_librbd.sh) on smithi008 with status 139: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=jewel TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/test_librbd.sh'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore-comp.yaml tasks/workunits.yaml}
Command failed (workunit test mgr/test_localpool.sh) on smithi025 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=bb561855173f8aa62c2ddcb580a927455a578973 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/mgr/test_localpool.sh'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/singleton/{all/recovery-preemption.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
Command failed on smithi190 with status 1: 'sudo TESTDIR=/home/ubuntu/cephtest bash -c \'egrep \'"\'"\'(defer backfill|defer recovery)\'"\'"\' /var/log/ceph/ceph-osd.*.log\''
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
Command failed on smithi193 with status 1: 'sudo yum -y install python34-cephfs'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/singleton/{all/rest-api.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml}
Command failed (workunit test rest/test.py) on smithi152 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=bb561855173f8aa62c2ddcb580a927455a578973 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rest/test.py'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml}
Command failed (workunit test rados/test_envlibrados_for_rocksdb.sh) on smithi156 with status 100: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=bb561855173f8aa62c2ddcb580a927455a578973 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_envlibrados_for_rocksdb.sh'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/objectstore/filestore-idempotent-aio-journal.yaml
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/objectstore/filestore-idempotent.yaml
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
centos 7.4
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-mkfs-balancer-upmap.yaml leveldb.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported/centos_latest.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
Command failed on smithi098 with status 1: 'sudo yum -y install python34-cephfs'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/rest/rest_test.yaml
Command failed (workunit test rest/test.py) on smithi193 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=bb561855173f8aa62c2ddcb580a927455a578973 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rest/test.py'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml}
Command failed on smithi143 with status 1: 'sudo yum -y install python34-cephfs'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/filestore-xfs.yaml tasks/module_selftest.yaml}
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/mgr/{clusters/2-node-mgr.yaml debug/mgr.yaml objectstore/bluestore-bitmap.yaml tasks/workunits.yaml}
Command failed (workunit test mgr/test_localpool.sh) on smithi190 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=bb561855173f8aa62c2ddcb580a927455a578973 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/mgr/test_localpool.sh'
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/monthrash/{ceph.yaml clusters/3-mons.yaml d-require-luminous/at-end.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml thrashers/many.yaml workloads/rados_5925.yaml}
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
 
rados/objectstore/objectstore.yaml
Command crashed: 'sudo TESTDIR=/home/ubuntu/cephtest bash -c \'mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-filestore 20 --debug-bluestore 20" ceph_test_objectstore --gtest_filter=-*/3 --gtest_catch_exceptions=0\''
wip-yuri-testing-2019-06-05-2303-luminous
wip-yuri-testing-2019-06-05-2303-luminous
master
smithi
centos 
rados/singleton-nomsgr/{all/valgrind-leaks.yaml rados.yaml}
Command failed on smithi202 with status 1: 'sudo yum -y install python34-cephfs'