Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 332560 2016-07-25 11:21:09 2016-07-25 19:14:04 2016-07-26 00:27:30 5:13:26 3:31:26 1:42:00 vps master centos 7.2 upgrade:jewel-x/parallel/{kraken.yaml 0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-workload/{blogbench.yaml ec-rados-default.yaml rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 3-upgrade-sequence/upgrade-all.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/centos_7.2.yaml} 3
Failure Reason:

Command failed (workunit test rados/test.sh) on vpm069 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1194331665ffe878acfd3b24b8ad8a88961d2d9d TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/workunit.client.1/rados/test.sh'

fail 332561 2016-07-25 11:21:11 2016-07-25 19:14:03 2016-07-25 21:51:25 2:37:22 1:22:06 1:15:16 vps master centos 7.2 upgrade:jewel-x/point-to-point-x/{point-to-point-upgrade.yaml distros/centos_7.2.yaml} 3
Failure Reason:

Command failed (workunit test cls/test_cls_rbd.sh) on vpm183 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=jewel TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/workunit.client.1/cls/test_cls_rbd.sh'

fail 332562 2016-07-25 11:21:12 2016-07-25 19:14:04 2016-07-26 00:31:32 5:17:28 3:53:31 1:23:57 vps master centos 7.2 upgrade:jewel-x/stress-split/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-mon/mona.yaml 5-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 6-next-mon/monb.yaml 7-workload/{radosbench.yaml rbd_api.yaml} 8-next-mon/monc.yaml 9-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} distros/centos_7.2.yaml} 3
Failure Reason:

Command failed on vpm143 with status 33: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd reweight-by-utilization 1000 0.05'

pass 332563 2016-07-25 11:21:14 2016-07-25 19:14:04 2016-07-25 22:55:29 3:41:25 1:56:11 1:45:14 vps master centos 7.2 upgrade:jewel-x/stress-split-erasure-code/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-mon/mona.yaml 5-workload/ec-rados-default.yaml 6-next-mon/monb.yaml 8-next-mon/monc.yaml 9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml distros/centos_7.2.yaml} 3
pass 332564 2016-07-25 11:21:15 2016-07-25 19:13:58 2016-07-25 21:45:22 2:31:24 1:23:51 1:07:33 vps master upgrade:jewel-x/stress-split-erasure-code-x86_64/{0-x86_64.yaml 0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-mon/mona.yaml 5-workload/ec-rados-default.yaml 6-next-mon/monb.yaml 8-next-mon/monc.yaml 9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml} 3
fail 332565 2016-07-25 11:21:17 2016-07-25 19:14:08 2016-07-26 00:57:35 5:43:27 3:18:54 2:24:33 vps master ubuntu 14.04 upgrade:jewel-x/parallel/{kraken.yaml 0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-workload/{blogbench.yaml ec-rados-default.yaml rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_14.04.yaml} 3
Failure Reason:

Command failed (workunit test rados/test.sh) on vpm109 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1194331665ffe878acfd3b24b8ad8a88961d2d9d TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/workunit.client.1/rados/test.sh'

fail 332566 2016-07-25 11:21:19 2016-07-25 19:14:07 2016-07-26 00:19:36 5:05:29 3:16:19 1:49:10 vps master centos 7.2 upgrade:jewel-x/parallel/{kraken.yaml 0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-workload/{blogbench.yaml ec-rados-default.yaml rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/centos_7.2.yaml} 3
Failure Reason:

Command failed (workunit test rados/test.sh) on vpm023 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1194331665ffe878acfd3b24b8ad8a88961d2d9d TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/workunit.client.1/rados/test.sh'

fail 332567 2016-07-25 11:21:20 2016-07-25 19:14:10 2016-07-25 21:35:31 2:21:21 1:05:51 1:15:30 vps master ubuntu 14.04 upgrade:jewel-x/point-to-point-x/{point-to-point-upgrade.yaml distros/ubuntu_14.04.yaml} 3
Failure Reason:

Command failed (workunit test cls/test_cls_rbd.sh) on vpm025 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=jewel TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/workunit.client.1/cls/test_cls_rbd.sh'

fail 332568 2016-07-25 11:21:22 2016-07-25 19:25:18 2016-07-25 23:02:38 3:37:20 2:39:23 0:57:57 vps master ubuntu 14.04 upgrade:jewel-x/stress-split/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-mon/mona.yaml 5-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 6-next-mon/monb.yaml 7-workload/{radosbench.yaml rbd_api.yaml} 8-next-mon/monc.yaml 9-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} distros/ubuntu_14.04.yaml} 3
Failure Reason:

Command failed on vpm169 with status 33: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd test-reweight-by-utilization 1000 3.0'

pass 332569 2016-07-25 11:21:23 2016-07-25 19:25:21 2016-07-25 21:44:39 2:19:18 1:34:03 0:45:15 vps master ubuntu 14.04 upgrade:jewel-x/stress-split-erasure-code/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-mon/mona.yaml 5-workload/ec-rados-default.yaml 6-next-mon/monb.yaml 8-next-mon/monc.yaml 9-workload/ec-rados-plugin=jerasure-k=3-m=1.yaml distros/ubuntu_14.04.yaml} 3
fail 332570 2016-07-25 11:21:25 2016-07-25 19:25:25 2016-07-26 00:04:46 4:39:21 3:04:50 1:34:31 vps master ubuntu 14.04 upgrade:jewel-x/parallel/{kraken.yaml 0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 2-workload/{blogbench.yaml ec-rados-default.yaml rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 3-upgrade-sequence/upgrade-all.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_14.04.yaml} 3
Failure Reason:

Command failed (workunit test rados/test.sh) on vpm021 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1194331665ffe878acfd3b24b8ad8a88961d2d9d TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/workunit.client.1/rados/test.sh'