ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
master
master
master
vps
ubuntu 16.04
smoke/1node/{clusters/{fixed-1.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/ceph-deploy.yaml}
Command failed on vpm045 with status 32: 'sudo umount /dev/vdb1'
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_blogbench.yaml}
master
master
master
vps
centos 7.3
smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/centos_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_fsstress.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_iozone.yaml}
Could not reconnect to ubuntu@vpm131.front.sepia.ceph.com
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/cfuse_workunit_suites_pjd.yaml}
{'vpm173.front.sepia.ceph.com': {'_ansible_parsed': True, 'stderr_lines': ['E: Could not get lock /var/lib/dpkg/lock - open (11: Resource temporarily unavailable)', 'E: Unable to lock the administration directory (/var/lib/dpkg/), is another process using it?'], 'changed': False, '_ansible_no_log': False, 'stdout': '', 'cache_updated': False, 'failed': True, 'stderr': 'E: Could not get lock /var/lib/dpkg/lock - open (11: Resource temporarily unavailable)\nE: Unable to lock the administration directory (/var/lib/dpkg/), is another process using it?\n', 'invocation': {'module_args': {'dpkg_options': 'force-confdef,force-confold', 'autoremove': None, 'force': False, 'name': 'ntp', 'package': ['ntp'], 'purge': False, 'allow_unauthenticated': False, 'state': 'present', 'upgrade': None, 'update_cache': None, 'deb': None, 'only_upgrade': False, 'cache_valid_time': 0, 'default_release': None, 'install_recommends': None}}, 'msg': '\'/usr/bin/apt-get -y -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" install \'ntp\'\' failed: E: Could not get lock /var/lib/dpkg/lock - open (11: Resource temporarily unavailable)\nE: Unable to lock the administration directory (/var/lib/dpkg/), is another process using it?\n', 'stdout_lines': [], 'cache_update_time': 1495348875}}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_direct_io.yaml}
Command failed on vpm173 with status 110: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 172.21.2.113:6789,172.21.2.77:6790,172.21.2.77:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_dbench.yaml}
Command failed on vpm123 with status 110: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 172.21.2.193:6789,172.21.2.51:6790,172.21.2.51:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_fsstress.yaml}
Command failed on vpm093 with status 110: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 172.21.2.131:6789,172.21.2.161:6790,172.21.2.161:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml}
Command failed on vpm137 with status 110: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /sbin/mount.ceph 172.21.2.65:6789,172.21.2.135:6790,172.21.2.135:6789:/ /home/ubuntu/cephtest/mnt.0 -v -o name=0,secretfile=/home/ubuntu/cephtest/ceph.data/client.0.secret,norequire_active_mds'
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/libcephfs_interface_tests.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/mon_thrash.yaml}
Command failed (workunit test rados/test.sh) on vpm021 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=master TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_api_tests.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_bench.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cache_snaps.yaml}
Could not reconnect to ubuntu@vpm031.front.sepia.ceph.com
master
master
master
vps
ubuntu 16.04
smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/ubuntu_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml}
Command failed on vpm101 with status 1: 'adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage sudo ceph --cluster ceph health'
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cls_all.yaml}
Could not reconnect to ubuntu@vpm055.front.sepia.ceph.com
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_ec_snaps.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_python.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_workunit_loadgen_mix.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_api_tests.yaml}
Could not reconnect to ubuntu@vpm031.front.sepia.ceph.com
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_cli_import_export.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_fsx.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_python_api_tests.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rbd_workunit_suites_iozone.yaml}
Command failed on vpm085 with status 110: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage rbd --user 0 -p rbd map testimage.client.0 && while test '!' -e /dev/rbd/rbd/testimage.client.0 ; do sleep 1 ; done"
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_ec_s3tests.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_s3tests.yaml}
master
master
master
vps
 
smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rgw_swift.yaml}
Could not reconnect to ubuntu@vpm183.front.sepia.ceph.com