ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
luminous
luminous
wip-daemon-helper-systemd
ovh
ubuntu 16.04
smoke/1node/{clusters/{fixed-1.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/ceph-deploy.yaml}
'check health' reached maximum tries (6) after waiting for 60 seconds
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_blogbench.yaml}
luminous
luminous
wip-daemon-helper-systemd
ovh
centos 7.3
smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/centos_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml}
Command failed on ovh016 with status 1: 'sudo ceph-create-keys --cluster ceph --id ovh016'
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_fsstress.yaml}
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_iozone.yaml}
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_pjd.yaml}
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
Command failed on ovh071 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.91.220:6789,158.69.91.133:6790,158.69.91.133:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
Command failed on ovh069 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.91.212:6789,158.69.90.96:6790,158.69.90.96:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
Command failed on ovh014 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.91.10:6789,158.69.91.205:6790,158.69.91.205:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
Command failed on ovh085 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.90.98:6789,158.69.91.22:6790,158.69.91.22:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/libcephfs_interface_tests.yaml}
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/mon_thrash.yaml}
"2017-10-13 21:18:53.902943 mon.a mon.0 158.69.90.69:6789/0 4 : cluster [WRN] overall HEALTH_WARN 1 cache pools are missing hit_sets; 1/3 mons down, quorum b,c" in cluster log
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_api_tests.yaml}
"2017-10-13 21:16:39.478542 mon.a mon.0 158.69.91.100:6789/0 122 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_bench.yaml}
"2017-10-13 21:29:12.959474 mon.a mon.0 158.69.90.81:6789/0 2360 : cluster [ERR] Health check failed: full ratio(s) out of order (OSD_OUT_OF_ORDER_FULL)" in cluster log
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cache_snaps.yaml}
"2017-10-13 21:19:20.360448 mon.b mon.0 158.69.90.79:6789/0 136 : cluster [ERR] Health check failed: full ratio(s) out of order (OSD_OUT_OF_ORDER_FULL)" in cluster log
luminous
luminous
wip-daemon-helper-systemd
ovh
ubuntu 16.04
smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml}
Command failed (workunit test rados/load-gen-mix.sh) on ovh063 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=luminous TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/load-gen-mix.sh'
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cls_all.yaml}
Command failed (workunit test cls/test_cls_sdk.sh) on ovh085 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=luminous TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_sdk.sh'
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_ec_snaps.yaml}
"2017-10-13 21:19:03.935943 mon.a mon.0 158.69.91.127:6789/0 174 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_python.yaml}
"2017-10-13 21:18:10.403347 mon.a mon.0 158.69.90.75:6789/0 239 : cluster [WRN] Health check failed: noup flag(s) set (OSDMAP_FLAGS)" in cluster log
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_workunit_loadgen_mix.yaml}
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_api_tests.yaml}
"2017-10-13 21:25:00.816695 mon.b mon.0 158.69.91.37:6789/0 490 : cluster [WRN] Health check failed: 1 cache pools are missing hit_sets (CACHE_POOL_NO_HIT_SET)" in cluster log
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_cli_import_export.yaml}
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_fsx.yaml}
"2017-10-13 21:25:40.336050 mon.a mon.0 158.69.91.35:6789/0 199 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_python_api_tests.yaml}
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_workunit_suites_iozone.yaml}
Command failed on ovh096 with status 110: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage rbd --user 0 -p rbd map testimage.client.0 && while test '!' -e /dev/rbd/rbd/testimage.client.0 ; do sleep 1 ; done"
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_ec_s3tests.yaml}
Command failed (s3 tests against rgw) on ovh051 with status 1: "S3TEST_CONF=/home/ubuntu/cephtest/archive/s3-tests.client.0.conf BOTO_CONFIG=/home/ubuntu/cephtest/boto.cfg /home/ubuntu/cephtest/s3-tests/virtualenv/bin/nosetests -w /home/ubuntu/cephtest/s3-tests -v -a '!fails_on_rgw,!lifecycle'"
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_s3tests.yaml}
Command failed (s3 tests against rgw) on ovh071 with status 1: "S3TEST_CONF=/home/ubuntu/cephtest/archive/s3-tests.client.0.conf BOTO_CONFIG=/home/ubuntu/cephtest/boto.cfg /home/ubuntu/cephtest/s3-tests/virtualenv/bin/nosetests -w /home/ubuntu/cephtest/s3-tests -v -a '!fails_on_rgw,!lifecycle'"
luminous
luminous
wip-daemon-helper-systemd
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_swift.yaml}