Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
vpm041.front.sepia.ceph.com vps False False centos 7.4 x86_64 None
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 2174046 2018-02-08 23:07:38 2018-02-08 23:31:43 2018-02-08 23:55:43 0:24:00 0:18:44 0:05:16 vps master centos 7.4 ceph-deploy/basic/{ceph-deploy-overrides/enable_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/centos_latest.yaml objectstore/filestore-xfs.yaml python_versions/python_3.yaml tasks/ceph-admin-commands.yaml} 2
pass 2173990 2018-02-08 23:07:19 2018-02-08 23:07:43 2018-02-08 23:29:43 0:22:00 0:15:52 0:06:08 vps master ubuntu 14.04 ceph-deploy/basic/{ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml config_options/cephdeploy_conf.yaml distros/ubuntu_14.04.yaml objectstore/bluestore.yaml python_versions/python_3.yaml tasks/ceph-admin-commands.yaml} 2
fail 2117839 2018-01-27 05:55:46 2018-01-27 05:55:47 2018-01-27 06:21:46 0:25:59 0:20:30 0:05:29 vps master centos 7.4 ceph-deploy/basic/{ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/centos_latest.yaml objectstore/bluestore.yaml python_versions/python_3.yaml tasks/ceph-admin-commands.yaml} 2
Failure Reason:

ceph-deploy: Failed to zap osds

pass 2116730 2018-01-27 03:59:42 2018-01-27 03:59:52 2018-01-27 04:27:51 0:27:59 0:22:00 0:05:59 vps master centos 7.4 ceph-deploy/basic/{ceph-deploy-overrides/enable_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/centos_latest.yaml objectstore/filestore-xfs.yaml python_versions/python_2.yaml tasks/ceph-admin-commands.yaml} 2
fail 2109029 2018-01-25 05:55:52 2018-01-25 06:12:00 2018-01-25 06:29:59 0:17:59 0:12:00 0:05:59 vps master ubuntu 14.04 ceph-deploy/basic/{ceph-deploy-overrides/disable_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/ubuntu_14.04.yaml objectstore/bluestore.yaml python_versions/python_2.yaml tasks/ceph-admin-commands.yaml} 2
Failure Reason:

ceph-deploy: Failed to zap osds

fail 2109021 2018-01-25 05:55:47 2018-01-25 05:55:49 2018-01-25 06:11:48 0:15:59 0:11:01 0:04:58 vps master ubuntu 16.04 ceph-deploy/basic/{ceph-deploy-overrides/disable_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/ubuntu_latest.yaml objectstore/bluestore.yaml python_versions/python_3.yaml tasks/ceph-admin-commands.yaml} 2
Failure Reason:

Command failed on vpm041 with status 5: 'sudo stop ceph-all || sudo service ceph stop || sudo systemctl stop ceph.target'

fail 2103434 2018-01-23 05:56:06 2018-01-23 06:17:51 2018-01-23 06:31:51 0:14:00 0:07:59 0:06:01 vps master ubuntu 14.04 ceph-deploy/basic/{ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/ubuntu_14.04.yaml objectstore/bluestore.yaml python_versions/python_2.yaml tasks/ceph-admin-commands.yaml} 2
Failure Reason:

{'vpm041.front.sepia.ceph.com': {'_ansible_parsed': True, 'stderr_lines': ['E: Could not get lock /var/lib/dpkg/lock - open (11: Resource temporarily unavailable)', 'E: Unable to lock the administration directory (/var/lib/dpkg/), is another process using it?'], 'changed': False, '_ansible_no_log': False, 'stdout': '', 'cache_updated': False, 'invocation': {'module_args': {'autoremove': False, 'force': False, 'force_apt_get': False, 'update_cache': None, 'only_upgrade': False, 'deb': None, 'cache_valid_time': 0, 'dpkg_options': 'force-confdef,force-confold', 'upgrade': None, 'name': 'krb5-user', 'package': ['krb5-user'], 'autoclean': False, 'purge': False, 'allow_unauthenticated': False, 'state': 'present', 'default_release': None, 'install_recommends': None}}, 'failed': True, 'stderr': 'E: Could not get lock /var/lib/dpkg/lock - open (11: Resource temporarily unavailable)\nE: Unable to lock the administration directory (/var/lib/dpkg/), is another process using it?\n', 'rc': 100, 'msg': '\'/usr/bin/apt-get -y -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" install \'krb5-user\'\' failed: E: Could not get lock /var/lib/dpkg/lock - open (11: Resource temporarily unavailable)\nE: Unable to lock the administration directory (/var/lib/dpkg/), is another process using it?\n', 'stdout_lines': [], 'cache_update_time': 1516688702}}

fail 2103416 2018-01-23 05:55:54 2018-01-23 05:56:01 2018-01-23 06:16:01 0:20:00 0:13:45 0:06:15 vps master ubuntu 14.04 ceph-deploy/basic/{ceph-deploy-overrides/disable_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/ubuntu_14.04.yaml objectstore/bluestore.yaml python_versions/python_3.yaml tasks/ceph-admin-commands.yaml} 2
Failure Reason:

ceph-deploy: Failed to zap osds

fail 2098366 2018-01-22 03:59:41 2018-01-22 03:59:42 2018-01-22 04:25:42 0:26:00 0:20:19 0:05:41 vps master centos 7.4 ceph-deploy/basic/{ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/centos_latest.yaml objectstore/filestore-xfs.yaml python_versions/python_3.yaml tasks/ceph-admin-commands.yaml} 2
Failure Reason:

Command failed on vpm103 with status 1: 'sudo tar cz -f /tmp/tmprS62VX -C /var/lib/ceph/mon -- .'

fail 2094711 2018-01-20 05:55:47 2018-01-20 05:55:48 2018-01-20 06:23:48 0:28:00 0:20:18 0:07:42 vps master centos 7.4 ceph-deploy/basic/{ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/centos_latest.yaml objectstore/bluestore.yaml python_versions/python_3.yaml tasks/ceph-admin-commands.yaml} 2
Failure Reason:

ceph-deploy: Failed to zap osds

fail 2093616 2018-01-20 03:59:58 2018-01-20 04:00:01 2018-01-20 04:09:59 0:09:58 0:03:28 0:06:30 vps master centos 7.4 ceph-deploy/basic/{ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/centos_latest.yaml objectstore/filestore-xfs.yaml python_versions/python_2.yaml tasks/ceph-admin-commands.yaml} 2
Failure Reason:

{'vpm103.front.sepia.ceph.com': {'msg': 'All items completed', 'failed': True, 'changed': True}, 'vpm041.front.sepia.ceph.com': {'msg': 'All items completed', 'failed': True, 'changed': True}}

pass 2093607 2018-01-20 03:59:52 2018-01-20 03:59:59 2018-01-20 04:39:58 0:39:59 0:15:17 0:24:42 vps master ubuntu 16.04 ceph-deploy/basic/{ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/ubuntu_latest.yaml objectstore/bluestore.yaml python_versions/python_2.yaml tasks/ceph-admin-commands.yaml} 2
dead 2084246 2018-01-17 23:22:00 2018-01-17 23:22:02 2018-01-17 23:42:01 0:19:59 vps master ubuntu 16.04 ceph-deploy/basic/{ceph-deploy-overrides/enable_dmcrypt_diff_journal_disk.yaml config_options/cephdeploy_conf.yaml distros/ubuntu_latest.yaml objectstore/bluestore.yaml python_versions/python_2.yaml tasks/ceph-admin-commands.yaml} 2
pass 2043116 2018-01-08 18:03:38 2018-01-08 18:04:07 2018-01-08 18:48:06 0:43:59 0:23:11 0:20:48 vps master centos 7.4 ceph-deploy/basic/{ceph-deploy-overrides/ceph_deploy_dmcrypt.yaml config_options/cephdeploy_conf.yaml distros/centos_latest.yaml objectstore/bluestore.yaml python_versions/python_3.yaml tasks/ceph-admin-commands.yaml} 2
dead 2042027 2018-01-08 05:00:40 2018-01-08 05:17:49 2018-01-08 17:20:15 12:02:26 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/rados_cache_snaps.yaml} 3
pass 2040662 2018-01-08 02:25:39 2018-01-08 02:25:41 2018-01-08 05:25:45 3:00:04 2:39:01 0:21:03 vps master centos 7.4 upgrade:luminous-x/stress-split/{0-cluster/{openstack.yaml start.yaml} 1-ceph-install/luminous.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{radosbench.yaml rbd-cls.yaml rbd-import-export.yaml rbd_api.yaml readwrite.yaml snaps-few-objects.yaml} 5-finish-upgrade.yaml 7-final-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} distros/centos_latest.yaml objectstore/filestore-xfs.yaml thrashosds-health.yaml} 3
fail 2040661 2018-01-08 02:25:38 2018-01-08 02:25:40 2018-01-08 02:39:39 0:13:59 vps master ubuntu 16.04 upgrade:luminous-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-ceph-install/luminous.yaml 2-workload/{blogbench.yaml ec-rados-default.yaml rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 5-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} distros/ubuntu_latest.yaml objectstore/bluestore.yaml} 3
Failure Reason:

Could not reconnect to ubuntu@vpm099.front.sepia.ceph.com

pass 2038848 2018-01-07 05:00:38 2018-01-07 06:04:36 2018-01-07 08:12:38 2:08:02 0:20:10 1:47:52 vps master smoke/basic/{clusters/{fixed-3-cephfs.yaml openstack.yaml} objectstore/bluestore.yaml tasks/kclient_workunit_suites_pjd.yaml} 3
pass 2038834 2018-01-07 05:00:34 2018-01-07 05:03:50 2018-01-07 14:42:01 9:38:11 0:58:41 8:39:30 vps master centos 7.4 smoke/systemd/{clusters/{fixed-4.yaml openstack.yaml} distros/centos_latest.yaml objectstore/filestore-xfs.yaml tasks/systemd.yaml} 4
pass 2038695 2018-01-07 04:23:55 2018-01-07 10:44:24 2018-01-07 16:44:32 6:00:08 1:29:17 4:30:51 vps master ubuntu 16.04 upgrade:jewel-x/parallel/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.5-final-scrub.yaml 2-workload/blogbench.yaml 3-upgrade-sequence/upgrade-mon-osd-mds.yaml 4-luminous.yaml 5-workload.yaml 6-luminous-with-mgr.yaml 6.5-crush-compat.yaml 7-final-workload/{blogbench.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_swift.yaml} 8-jewel-workload.yaml distros/ubuntu_latest.yaml} 4