ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 16.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/blogbench.yaml 3-upgrade-sequence/upgrade-all.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_latest.yaml}
Command failed on smithi088 with status 2: 'sudo ceph --cluster ceph osd crush create-or-move osd.2 1.0 host=localhost root=default'
wip-sage-testing
wip-sage-testing
master
smithi
centos 7.3
upgrade:jewel-x/point-to-point-x/{distros/centos_7.3.yaml point-to-point-upgrade.yaml}
Command failed on smithi011 with status 1: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'ceph osd set-require-min-compat-client luminous'"
wip-sage-testing
wip-sage-testing
master
smithi
centos 7.3
upgrade:jewel-x/stress-split/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{radosbench.yaml rbd-cls.yaml rbd-import-export.yaml rbd_api.yaml readwrite.yaml snaps-few-objects.yaml} 5-finish-upgrade.yaml 6-luminous.yaml 7-final-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} distros/centos_latest.yaml}
Command failed on smithi136 with status 2: 'sudo ceph --cluster ceph osd crush create-or-move osd.3 1.0 host=localhost root=default'
wip-sage-testing
wip-sage-testing
master
smithi
centos 7.3
upgrade:jewel-x/stress-split-erasure-code/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/ec-rados-default.yaml 5-finish-upgrade.yaml 6-luminous.yaml 7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml distros/centos_latest.yaml}
"2017-07-12 13:58:41.585166 mon.a mon.0 172.21.15.4:6789/0 22 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
centos 7.3
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/cache-pool-snaps.yaml 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/centos_latest.yaml}
"2017-07-12 13:54:36.592176 mon.b mon.0 172.21.15.9:6789/0 341 : cluster [ERR] Health check failed: 1 mds daemon down (MDS_FAILED)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 14.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/ec-rados-default.yaml 3-upgrade-sequence/upgrade-all.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_14.04.yaml}
"2017-07-12 13:49:28.407201 mon.b mon.0 172.21.15.159:6789/0 60 : cluster [WRN] Health check failed: 3 osds down (OSD_DOWN)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 16.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/rados_api.yaml 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_latest.yaml}
Command failed on smithi105 with status 2: 'sudo ceph --cluster ceph osd crush create-or-move osd.2 1.0 host=localhost root=default'
wip-sage-testing
wip-sage-testing
master
smithi
centos 7.3
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/rados_loadgenbig.yaml 3-upgrade-sequence/upgrade-all.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/centos_latest.yaml}
Command failed on smithi136 with status 2: 'sudo ceph --cluster ceph osd crush create-or-move osd.2 1.0 host=localhost root=default'
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 14.04
upgrade:jewel-x/stress-split/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{radosbench.yaml rbd-cls.yaml rbd-import-export.yaml rbd_api.yaml readwrite.yaml snaps-few-objects.yaml} 5-finish-upgrade.yaml 6-luminous.yaml 7-final-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} distros/ubuntu_14.04.yaml}
"2017-07-12 13:46:30.225680 mon.a mon.0 172.21.15.53:6789/0 25 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 14.04
upgrade:jewel-x/stress-split-erasure-code/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/ec-rados-default.yaml 5-finish-upgrade.yaml 6-luminous.yaml 7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml distros/ubuntu_14.04.yaml}
"2017-07-12 14:01:49.174852 mon.a mon.0 172.21.15.162:6789/0 22 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 14.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/test_rbd_api.yaml 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_14.04.yaml}
"2017-07-12 13:59:02.799673 mon.a mon.0 172.21.15.62:6789/0 529 : cluster [ERR] Health check failed: 1 mds daemon down (MDS_FAILED)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 16.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/test_rbd_python.yaml 3-upgrade-sequence/upgrade-all.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_latest.yaml}
"2017-07-12 13:52:12.767246 mon.b mon.0 172.21.15.185:6789/0 74 : cluster [ERR] Health check failed: 1 mds daemon down (MDS_FAILED)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
centos 7.3
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/blogbench.yaml 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/centos_latest.yaml}
Command failed on smithi139 with status 2: 'sudo ceph --cluster ceph osd crush create-or-move osd.2 1.0 host=localhost root=default'
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 14.04
upgrade:jewel-x/point-to-point-x/{distros/ubuntu_14.04.yaml point-to-point-upgrade.yaml}
Command failed on smithi045 with status 1: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'ceph osd set-require-min-compat-client luminous'"
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 14.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/cache-pool-snaps.yaml 3-upgrade-sequence/upgrade-all.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_14.04.yaml}
"2017-07-12 14:25:12.968420 mon.b mon.0 172.21.15.40:6789/0 123 : cluster [ERR] Health check failed: 1 mds daemon down (MDS_FAILED)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 16.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/ec-rados-default.yaml 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_latest.yaml}
"2017-07-12 15:14:11.883874 mon.a mon.0 172.21.15.92:6789/0 519 : cluster [ERR] Health check failed: 1 mds daemon down (MDS_FAILED)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 16.04
upgrade:jewel-x/stress-split/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{radosbench.yaml rbd-cls.yaml rbd-import-export.yaml rbd_api.yaml readwrite.yaml snaps-few-objects.yaml} 5-finish-upgrade.yaml 6-luminous.yaml 7-final-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} distros/ubuntu_latest.yaml}
"2017-07-12 14:58:24.898798 mon.a mon.0 172.21.15.188:6789/0 25 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 16.04
upgrade:jewel-x/stress-split-erasure-code/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/ec-rados-default.yaml 5-finish-upgrade.yaml 6-luminous.yaml 7-final-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml distros/ubuntu_latest.yaml}
Command failed on smithi093 with status 2: 'sudo ceph --cluster ceph osd crush create-or-move osd.3 1.0 host=localhost root=default'
wip-sage-testing
wip-sage-testing
master
smithi
centos 7.3
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/rados_api.yaml 3-upgrade-sequence/upgrade-all.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/centos_latest.yaml}
"2017-07-12 13:57:47.167682 mon.a mon.0 172.21.15.144:6789/0 61 : cluster [ERR] Health check failed: 1 mds daemon down (MDS_FAILED)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 14.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/rados_loadgenbig.yaml 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_14.04.yaml}
"2017-07-12 13:58:34.535523 mon.b mon.0 172.21.15.50:6789/0 54 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage-testing
wip-sage-testing
master
smithi
ubuntu 16.04
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/test_rbd_api.yaml 3-upgrade-sequence/upgrade-all.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_latest.yaml}
Command failed on smithi187 with status 2: 'sudo ceph --cluster ceph osd crush create-or-move osd.2 1.0 host=localhost root=default'
wip-sage-testing
wip-sage-testing
master
smithi
centos 7.3
upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/test_rbd_python.yaml 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 4-luminous.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/centos_latest.yaml}
"2017-07-12 14:08:21.013692 mon.b mon.0 172.21.15.26:6789/0 152 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log