ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
master
master
master
ovh
ubuntu 16.04
smoke/1node/{clusters/{fixed-1.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/ceph-deploy.yaml}
Command failed on ovh042 with status 32: 'sudo umount /dev/sdb1'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_blogbench.yaml}
"2018-04-19 05:38:26.640964 mon.a mon.0 158.69.91.7:6789/0 79 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
centos 7.4
smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/centos_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml}
Command failed on ovh033 with status 5: 'sudo stop ceph-all || sudo service ceph stop || sudo systemctl stop ceph.target'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_fsstress.yaml}
"2018-04-19 06:00:20.616342 mon.a mon.0 158.69.92.171:6789/0 76 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_iozone.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_pjd.yaml}
"2018-04-19 06:13:07.817376 mon.a mon.0 158.69.92.82:6789/0 106 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
"2018-04-19 05:50:20.995096 mon.a mon.0 158.69.92.1:6789/0 73 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
"2018-04-19 06:43:57.700423 mon.a mon.0 158.69.94.44:6789/0 83 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
"2018-04-19 05:46:23.857447 mon.b mon.0 158.69.91.81:6789/0 72 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
"2018-04-19 05:48:23.965531 mon.a mon.0 158.69.91.98:6789/0 75 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/libcephfs_interface_tests.yaml}
"2018-04-19 06:00:46.956204 mon.a mon.0 158.69.92.165:6789/0 75 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/mon_thrash.yaml}
"2018-04-19 06:04:54.290027 mon.a mon.0 158.69.92.196:6789/0 75 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_api_tests.yaml}
"2018-04-19 06:17:23.225337 mon.a mon.0 158.69.93.110:6789/0 108 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_bench.yaml}
"2018-04-19 06:19:56.887288 mon.b mon.0 158.69.93.172:6789/0 74 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cache_snaps.yaml}
"2018-04-19 06:37:34.617025 mon.a mon.0 158.69.94.224:6789/0 110 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
ubuntu 16.04
smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml}
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cls_all.yaml}
"2018-04-19 06:32:26.740486 mon.a mon.0 158.69.94.136:6789/0 104 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_ec_snaps.yaml}
"2018-04-19 06:42:15.427731 mon.b mon.0 158.69.94.30:6789/0 69 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_python.yaml}
"2018-04-19 06:15:34.969677 mon.a mon.0 158.69.92.99:6789/0 74 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_workunit_loadgen_mix.yaml}
"2018-04-19 06:32:40.701095 mon.a mon.0 158.69.94.134:6789/0 107 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_api_tests.yaml}
"2018-04-19 08:08:22.800713 mon.a mon.0 158.69.65.85:6789/0 108 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_cli_import_export.yaml}
Command failed (workunit test rbd/import_export.sh) on ovh060 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=master TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 RBD_CREATE_ARGS=--new-format adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/import_export.sh'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_fsx.yaml}
"2018-04-19 09:40:42.086770 mon.a mon.0 158.69.68.185:6789/0 105 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_python_api_tests.yaml}
"2018-04-19 07:02:44.018826 mon.b mon.0 158.69.64.125:6789/0 102 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_workunit_suites_iozone.yaml}
Command failed (workunit test suites/iozone.sh) on ovh086 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=master TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/iozone.sh'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_ec_s3tests.yaml}
Command failed on ovh016 with status 2: 'adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage radosgw-admin -n client.0 user rm --uid foo.client.0 --purge-data --cluster ceph'
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_s3tests.yaml}
"2018-04-19 07:00:53.746247 mon.b mon.0 158.69.95.37:6789/0 106 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log
master
master
master
ovh
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_swift.yaml}
"2018-04-19 07:19:43.316337 mon.a mon.0 158.69.64.213:6789/0 70 : cluster [ERR] Health check failed: 1 filesystem is offline (MDS_ALL_DOWN)" in cluster log