Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
ovh025.front.sepia.ceph.com ovh False False ubuntu 14.04 x86_64 None
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 861039 2017-02-26 10:16:07 2017-02-26 19:03:49 2017-02-26 19:35:48 0:31:59 0:15:24 0:16:35 ovh wip-libcloud krbd/rbd-nomount/{clusters/fixed-3.yaml conf.yaml fs/btrfs.yaml install/ceph.yaml msgr-failures/few.yaml tasks/rbd_kernel.yaml} 3
pass 861027 2017-02-26 10:15:59 2017-02-26 18:47:33 2017-02-26 20:09:34 1:22:01 0:24:37 0:57:24 ovh wip-libcloud krbd/rbd/{clusters/fixed-3.yaml conf.yaml fs/btrfs.yaml msgr-failures/few.yaml tasks/rbd_workunit_kernel_untar_build.yaml} 3
pass 860880 2017-02-26 10:11:04 2017-02-26 16:59:18 2017-02-26 19:15:20 2:16:02 2:00:12 0:15:50 ovh wip-libcloud fs/thrash/{ceph-thrash/default.yaml ceph/base.yaml clusters/mds-1active-1standby.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml fs/xfs.yaml msgr-failures/osd-mds-delay.yaml overrides/whitelist_wrongly_marked_down.yaml tasks/cfuse_workunit_snaptests.yaml} 2
fail 860829 2017-02-26 10:10:41 2017-02-26 16:17:31 2017-02-26 17:09:31 0:52:00 0:41:27 0:10:33 ovh wip-libcloud fs/basic/{clusters/fixed-2-ucephfs.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml fs/btrfs.yaml inline/no.yaml overrides/whitelist_wrongly_marked_down.yaml tasks/cfuse_workunit_suites_ffsb.yaml} 2
Failure Reason:

Command failed (workunit test suites/ffsb.sh) on ovh025 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=ac3ba2adcd21ac011ad556ac4506623e61fbe696 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/ffsb.sh'

pass 860755 2017-02-26 10:03:57 2017-02-26 15:04:52 2017-02-26 16:26:53 1:22:01 1:01:19 0:20:42 ovh wip-libcloud rbd/librbd/{cache/writeback.yaml cachepool/none.yaml clusters/{fixed-3.yaml openstack.yaml} copy-on-read/off.yaml fs/xfs.yaml msgr-failures/few.yaml workloads/fsx.yaml} 3
pass 860351 2017-02-26 10:01:32 2017-02-26 10:01:58 2017-02-26 15:14:02 5:12:04 4:58:16 0:13:48 ovh wip-libcloud rbd/qemu/{cache/writethrough.yaml cachepool/ec-cache.yaml clusters/{fixed-3.yaml openstack.yaml} features/defaults.yaml fs/xfs.yaml msgr-failures/few.yaml workloads/qemu_xfstests.yaml} 3
fail 857018 2017-02-24 22:41:44 2017-02-25 03:07:37 2017-02-25 04:17:38 1:10:01 1:02:12 0:07:49 ovh wip-libcloud rgw/singleton/{all/radosgw-admin-data-sync.yaml frontend/civetweb.yaml fs/xfs.yaml objectstore/filestore.yaml overrides.yaml rgw_pool_type/ec.yaml xfs.yaml} 2
Failure Reason:

"2017-02-25 04:06:00.576683 osd.2 158.69.66.159:6804/11406 1 : cluster [WRN] bad locator @24 on object @24 op osd_op(client.4202.0:62 24.5 24:b08b92bd::::head [delete] snapc 0=[] ondisk+write+known_if_redirected e51) v8" in cluster log

fail 856925 2017-02-24 22:39:16 2017-02-25 01:57:21 2017-02-25 03:05:21 1:08:00 1:03:55 0:04:05 ovh wip-libcloud rgw/singleton/{all/radosgw-admin-data-sync.yaml frontend/apache.yaml fs/xfs.yaml objectstore/filestore.yaml overrides.yaml rgw_pool_type/ec-profile.yaml xfs.yaml} 2
Failure Reason:

"2017-02-25 02:52:15.288524 osd.0 158.69.64.187:6800/11508 1 : cluster [WRN] bad locator @24 on object @24 op osd_op(client.4202.0:62 24.5 24:b08b92bd::::head [delete] snapc 0=[] ondisk+write+known_if_redirected e57) v8" in cluster log

fail 856873 2017-02-24 22:37:47 2017-02-25 01:17:27 2017-02-25 01:57:26 0:39:59 0:34:47 0:05:12 ovh wip-libcloud centos rgw/verify/{clusters/fixed-2.yaml frontend/apache.yaml fs/xfs.yaml msgr-failures/few.yaml objectstore/filestore.yaml overrides.yaml rgw_pool_type/ec.yaml tasks/rgw_swift.yaml validater/valgrind.yaml} 2
Failure Reason:

saw valgrind issues

fail 856847 2017-02-24 22:37:06 2017-02-25 00:55:09 2017-02-25 01:19:09 0:24:00 0:15:54 0:08:06 ovh wip-libcloud ubuntu rgw/multifs/{clusters/fixed-2.yaml frontend/apache.yaml fs/xfs.yaml objectstore/filestore.yaml overrides.yaml rgw_pool_type/ec.yaml tasks/rgw_user_quota.yaml} 2
Failure Reason:

HTTPConnectionPool(host='ovh040.front.sepia.ceph.com', port=7280): Max retries exceeded with url: / (Caused by NewConnectionError('<requests.packages.urllib3.connection.HTTPConnection object at 0x7f12665e1dd0>: Failed to establish a new connection: [Errno 111] Connection refused',))

pass 856812 2017-02-24 22:36:05 2017-02-25 00:27:06 2017-02-25 00:55:05 0:27:59 0:22:45 0:05:14 ovh wip-libcloud ubuntu rgw/multifs/{clusters/fixed-2.yaml frontend/civetweb.yaml fs/xfs.yaml objectstore/filestore.yaml overrides.yaml rgw_pool_type/ec-profile.yaml tasks/rgw_bucket_quota.yaml} 2
fail 856749 2017-02-24 22:32:00 2017-02-24 23:38:47 2017-02-25 00:02:47 0:24:00 0:12:53 0:11:07 ovh wip-libcloud krbd/thrash/{clusters/fixed-3.yaml conf.yaml fs/btrfs.yaml thrashers/default.yaml workloads/rbd_workunit_suites_ffsb.yaml} 3
Failure Reason:

Command failed on ovh025 with status 5: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage rbd --user 0 -p rbd map testimage.client.0 && while test '!' -e /dev/rbd/rbd/testimage.client.0 ; do sleep 1 ; done"

fail 856742 2017-02-24 22:31:48 2017-02-24 23:31:19 2017-02-25 00:23:19 0:52:00 0:13:16 0:38:44 ovh wip-libcloud krbd/rbd-nomount/{clusters/fixed-3.yaml conf.yaml fs/btrfs.yaml install/ceph.yaml msgr-failures/many.yaml tasks/rbd_huge_tickets.yaml} 3
Failure Reason:

Command failed (workunit test rbd/huge-tickets.sh) on ovh025 with status 5: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=44b26f6ab48ced5ee066529cfdcc761c5385eb07 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rbd/huge-tickets.sh'

fail 856705 2017-02-24 22:30:08 2017-02-24 23:04:40 2017-02-24 23:42:37 0:37:57 0:13:47 0:24:10 ovh wip-libcloud kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml fs/xfs.yaml inline/no.yaml tasks/kclient_workunit_suites_fsync.yaml} 3
Failure Reason:

Command failed on ovh063 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.91.117:6789,158.69.90.96:6790,158.69.90.96:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'

fail 856673 2017-02-24 22:29:10 2017-02-24 22:29:14 2017-02-24 23:21:12 0:51:58 0:16:06 0:35:52 ovh wip-libcloud kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml fs/xfs.yaml inline/yes.yaml tasks/kclient_workunit_suites_dbench.yaml} 3
Failure Reason:

Command failed on ovh014 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.90.150:6789,158.69.90.117:6790,158.69.90.117:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'

fail 856652 2017-02-24 22:22:13 2017-02-24 22:22:29 2017-02-24 22:52:18 0:29:49 0:15:56 0:13:53 ovh wip-libcloud knfs/basic/{ceph/base.yaml clusters/extra-client.yaml fs/xfs.yaml mount/v3.yaml tasks/nfs_workunit_suites_ffsb.yaml} 4
Failure Reason:

Command failed on ovh032 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 158.69.88.38:6789,158.69.88.38:6790,158.69.88.236:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'

fail 848720 2017-02-22 21:52:52 2017-02-24 19:06:42 2017-02-24 19:32:41 0:25:59 0:17:43 0:08:16 ovh wip-libcloud upgrade:client-upgrade/jewel-client-x/basic/{0-cluster/start.yaml 1-install/jewel-client-x.yaml 2-workload/rbd_cli_import_export.yaml} 2
Failure Reason:

Command failed on ovh064 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 2'

fail 848679 2017-02-22 21:51:12 2017-02-24 18:32:28 2017-02-24 19:08:27 0:35:59 0:19:44 0:16:15 ovh wip-libcloud kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml fs/xfs.yaml inline/no.yaml tasks/kclient_workunit_suites_pjd.yaml} 3
Failure Reason:

Command failed on ovh066 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 2'

fail 848657 2017-02-22 21:50:29 2017-02-24 18:09:15 2017-02-24 18:41:15 0:32:00 0:19:52 0:12:08 ovh wip-libcloud kcephfs/cephfs/{clusters/fixed-3-cephfs.yaml conf.yaml fs/xfs.yaml inline/no.yaml tasks/kclient_workunit_trivial_sync.yaml} 3
Failure Reason:

Command failed on ovh025 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 2'

fail 848623 2017-02-22 21:49:07 2017-02-24 17:40:08 2017-02-24 18:14:08 0:34:00 0:19:17 0:14:43 ovh wip-libcloud krbd/rbd/{clusters/fixed-3.yaml conf.yaml fs/btrfs.yaml msgr-failures/many.yaml tasks/rbd_workunit_suites_fsstress.yaml} 3
Failure Reason:

Command failed on ovh025 with status 1: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-osd -f --cluster ceph -i 4'