ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
master
master
master
ovh
ubuntu 16.04
smoke/1node/{clusters/{fixed-1.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/ceph-deploy.yaml}
Command failed on ovh066 with status 32: 'sudo umount /dev/sdb1'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_blogbench.yaml}
master
master
master
ovh
centos 7.3
smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/centos_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml}
Command failed on ovh052 with status 1: 'sudo ceph-create-keys --cluster ceph --id ovh052'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_fsstress.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_iozone.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_pjd.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
Command failed on ovh050 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.90.200:6789,158.69.90.148:6790,158.69.90.148:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
Command failed on ovh093 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.90.244:6789,158.69.90.64:6790,158.69.90.64:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
Command failed on ovh057 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.90.153:6789,158.69.90.196:6790,158.69.90.196:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/libcephfs_interface_tests.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/mon_thrash.yaml}
"2017-09-08 19:59:49.671203 mon.a mon.0 158.69.90.23:6789/0 194 : cluster [WRN] Health check failed: 1/3 mons down, quorum a,b (MON_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_api_tests.yaml}
"2017-09-08 20:04:11.772025 mon.a mon.0 158.69.90.137:6789/0 145 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_bench.yaml}
"2017-09-08 20:18:31.119159 mon.a mon.0 158.69.90.7:6789/0 880 : cluster [ERR] Health check failed: full ratio(s) out of order (OSD_OUT_OF_ORDER_FULL)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cache_snaps.yaml}
"2017-09-08 20:08:56.788438 mon.b mon.0 158.69.90.69:6789/0 111 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log
master
master
master
ovh
ubuntu 16.04
smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cls_all.yaml}
Command failed (workunit test cls/test_cls_sdk.sh) on ovh033 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=master TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_sdk.sh'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_ec_snaps.yaml}
"2017-09-08 20:02:43.647001 mon.b mon.0 158.69.90.168:6789/0 138 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_python.yaml}
"2017-09-08 20:03:10.025929 mon.b mon.0 158.69.90.140:6789/0 150 : cluster [WRN] Health check failed: noup flag(s) set (OSDMAP_FLAGS)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_workunit_loadgen_mix.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_api_tests.yaml}
"2017-09-08 20:06:39.123713 mon.a mon.0 158.69.90.37:6789/0 194 : cluster [WRN] Health check failed: 1 cache pools are missing hit_sets (CACHE_POOL_NO_HIT_SET)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_cli_import_export.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_fsx.yaml}
"2017-09-08 20:05:10.276009 mon.a mon.0 158.69.90.2:6789/0 143 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_python_api_tests.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_workunit_suites_iozone.yaml}
Command failed on ovh094 with status 110: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage rbd --user 0 -p rbd map testimage.client.0 && while test '!' -e /dev/rbd/rbd/testimage.client.0 ; do sleep 1 ; done"
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_ec_s3tests.yaml}
Command failed (s3 tests against rgw) on ovh041 with status 1: "S3TEST_CONF=/home/ubuntu/cephtest/archive/s3-tests.client.0.conf BOTO_CONFIG=/home/ubuntu/cephtest/boto.cfg /home/ubuntu/cephtest/s3-tests/virtualenv/bin/nosetests -w /home/ubuntu/cephtest/s3-tests -v -a '!fails_on_rgw,!lifecycle'"
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_s3tests.yaml}
Command failed (s3 tests against rgw) on ovh006 with status 1: "S3TEST_CONF=/home/ubuntu/cephtest/archive/s3-tests.client.0.conf BOTO_CONFIG=/home/ubuntu/cephtest/boto.cfg /home/ubuntu/cephtest/s3-tests/virtualenv/bin/nosetests -w /home/ubuntu/cephtest/s3-tests -v -a '!fails_on_rgw,!lifecycle'"
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_swift.yaml}