Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 1561678 2017-08-25 05:01:47 2017-08-25 05:04:41 2017-08-25 05:30:40 0:25:59 0:14:58 0:11:01 vps master ubuntu 16.04 smoke/1node/{clusters/{fixed-1.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/ceph-deploy.yaml} 1
Failure Reason:

'check health' reached maximum tries (6) after waiting for 60 seconds

pass 1561681 2017-08-25 05:01:48 2017-08-25 05:06:35 2017-08-25 05:58:36 0:52:01 0:23:52 0:28:09 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_blogbench.yaml} 3
fail 1561684 2017-08-25 05:01:49 2017-08-25 05:08:38 2017-08-25 10:26:44 5:18:06 0:35:17 4:42:49 vps master centos 7.3 smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/centos_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml} 4
Failure Reason:

Command failed (workunit test rados/load-gen-mix.sh) on vpm043 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=master TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/load-gen-mix.sh'

fail 1561687 2017-08-25 05:01:49 2017-08-25 05:12:35 2017-08-25 06:18:35 1:06:00 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_fsstress.yaml} 3
Failure Reason:

Could not reconnect to ubuntu@vpm163.front.sepia.ceph.com

pass 1561690 2017-08-25 05:01:50 2017-08-25 05:27:31 2017-08-25 07:03:32 1:36:01 0:39:25 0:56:36 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_iozone.yaml} 3
pass 1561693 2017-08-25 05:01:51 2017-08-25 05:27:31 2017-08-25 07:33:33 2:06:02 0:23:11 1:42:51 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_pjd.yaml} 3
fail 1561696 2017-08-25 05:01:51 2017-08-25 05:30:42 2017-08-25 06:06:42 0:36:00 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml} 3
Failure Reason:

Could not reconnect to ubuntu@vpm127.front.sepia.ceph.com

pass 1561699 2017-08-25 05:01:52 2017-08-25 05:30:42 2017-08-25 06:34:43 1:04:01 0:47:42 0:16:19 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml} 3
pass 1561702 2017-08-25 05:01:53 2017-08-25 05:32:40 2017-08-25 07:22:42 1:50:02 0:23:23 1:26:39 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml} 3
pass 1561705 2017-08-25 05:01:54 2017-08-25 05:42:38 2017-08-25 07:06:39 1:24:01 0:21:02 1:02:59 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml} 3
pass 1561708 2017-08-25 05:01:54 2017-08-25 05:49:30 2017-08-25 07:07:31 1:18:01 0:24:39 0:53:22 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/libcephfs_interface_tests.yaml} 3
fail 1561711 2017-08-25 05:01:55 2017-08-25 05:58:40 2017-08-25 08:32:43 2:34:03 0:33:01 2:01:02 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/mon_thrash.yaml} 3
Failure Reason:

"2017-08-25 08:12:37.225324 mon.a mon.0 172.21.2.73:6789/0 185 : cluster [WRN] Health check failed: 1/3 mons down, quorum a,b (MON_DOWN)" in cluster log

fail 1561714 2017-08-25 05:01:56 2017-08-25 06:00:58 2017-08-25 07:26:59 1:26:01 0:32:22 0:53:39 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_api_tests.yaml} 3
Failure Reason:

"2017-08-25 07:06:31.100906 mon.b mon.0 172.21.2.21:6789/0 109 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log

fail 1561717 2017-08-25 05:01:57 2017-08-25 06:06:49 2017-08-25 07:52:50 1:46:01 0:42:37 1:03:24 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_bench.yaml} 3
Failure Reason:

"2017-08-25 07:25:17.062454 mon.b mon.0 172.21.2.7:6789/0 159 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log

fail 1561720 2017-08-25 05:01:57 2017-08-25 06:08:54 2017-08-25 08:54:57 2:46:03 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cache_snaps.yaml} 3
Failure Reason:

Could not reconnect to ubuntu@vpm195.front.sepia.ceph.com

fail 1561725 2017-08-25 05:01:58 2017-08-25 06:18:49 2017-08-25 10:18:53 4:00:04 0:29:36 3:30:28 vps master ubuntu 16.04 smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml} 4
Failure Reason:

Command failed (workunit test rados/load-gen-mix.sh) on vpm013 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=master TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/load-gen-mix.sh'

fail 1561728 2017-08-25 05:01:59 2017-08-25 06:21:10 2017-08-25 07:09:10 0:48:00 0:18:26 0:29:34 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cls_all.yaml} 3
Failure Reason:

Command failed (workunit test cls/test_cls_sdk.sh) on vpm031 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=master TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_sdk.sh'

fail 1561731 2017-08-25 05:02:00 2017-08-25 06:24:50 2017-08-25 08:08:51 1:44:01 0:32:40 1:11:21 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_ec_snaps.yaml} 3
Failure Reason:

"2017-08-25 07:48:37.984092 mon.a mon.0 172.21.2.37:6789/0 187 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log

fail 1561734 2017-08-25 05:02:01 2017-08-25 06:29:17 2017-08-25 07:51:17 1:22:00 0:26:15 0:55:45 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_python.yaml} 3
Failure Reason:

"2017-08-25 07:43:41.073770 mon.a mon.0 172.21.2.113:6789/0 241 : cluster [WRN] Health check failed: noup flag(s) set (OSDMAP_FLAGS)" in cluster log

pass 1561737 2017-08-25 05:02:01 2017-08-25 06:35:53 2017-08-25 08:40:02 2:04:09 0:36:17 1:27:52 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_workunit_loadgen_mix.yaml} 3
fail 1561740 2017-08-25 05:02:02 2017-08-25 06:39:03 2017-08-25 08:31:05 1:52:02 0:27:33 1:24:29 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_api_tests.yaml} 3
Failure Reason:

"2017-08-25 08:17:45.665381 mon.b mon.0 172.21.2.97:6789/0 476 : cluster [WRN] Health check failed: 1 cache pools are missing hit_sets (CACHE_POOL_NO_HIT_SET)" in cluster log

pass 1561743 2017-08-25 05:02:04 2017-08-25 06:39:03 2017-08-25 07:33:04 0:54:01 0:21:03 0:32:58 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_cli_import_export.yaml} 3
fail 1561746 2017-08-25 05:02:04 2017-08-25 06:40:56 2017-08-25 08:08:57 1:28:01 0:23:34 1:04:27 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_fsx.yaml} 3
Failure Reason:

"2017-08-25 07:59:53.365808 mon.b mon.0 172.21.2.103:6789/0 161 : cluster [WRN] Health check failed: noscrub flag(s) set (OSDMAP_FLAGS)" in cluster log

pass 1561749 2017-08-25 05:02:05 2017-08-25 06:42:42 2017-08-25 07:50:43 1:08:01 0:39:35 0:28:26 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_python_api_tests.yaml} 3
pass 1561752 2017-08-25 05:02:06 2017-08-25 06:43:06 2017-08-25 09:37:08 2:54:02 0:35:58 2:18:04 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_workunit_suites_iozone.yaml} 3
fail 1561755 2017-08-25 05:02:07 2017-08-25 06:46:38 2017-08-25 08:24:39 1:38:01 0:16:47 1:21:14 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_ec_s3tests.yaml} 3
Failure Reason:

'default_idle_timeout'

fail 1561758 2017-08-25 05:02:07 2017-08-25 06:51:04 2017-08-25 07:33:04 0:42:00 0:17:02 0:24:58 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_s3tests.yaml} 3
Failure Reason:

'default_idle_timeout'

fail 1561761 2017-08-25 05:02:08 2017-08-25 06:55:08 2017-08-25 08:57:10 2:02:02 0:19:53 1:42:09 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_swift.yaml} 3
Failure Reason:

'default_idle_timeout'