Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi117.front.sepia.ceph.com smithi True True 2020-06-06 10:15:47.816137 scheduled_kyr@teuthology ubuntu 18.04 x86_64 /home/teuthworker/archive/kyr-2020-06-06_07:03:15-rbd-octopus-distro-basic-smithi/5122352
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
running 5122352 2020-06-06 07:05:20 2020-06-06 10:14:17 2020-06-06 13:14:21 3:00:40 smithi master ubuntu 18.04 rbd/maintenance/{base/install clusters/{fixed-3 openstack} objectstore/bluestore-low-osd-mem-target qemu/xfstests supported-random-distro$/{ubuntu_latest} workloads/dynamic_features_no_cache} 3
pass 5122223 2020-06-06 07:03:28 2020-06-06 09:22:01 2020-06-06 10:16:02 0:54:01 0:42:03 0:11:58 smithi master ubuntu 18.04 rbd/qemu/{cache/writeback clusters/{fixed-3 openstack} features/journaling msgr-failures/few objectstore/bluestore-bitmap pool/ec-cache-pool supported-random-distro$/{ubuntu_latest} workloads/qemu_bonnie} 3
pass 5122148 2020-06-06 06:56:11 2020-06-06 09:03:59 2020-06-06 09:23:58 0:19:59 0:13:21 0:06:38 smithi master rhel 8.1 fs/basic_functional/{begin clusters/1-mds-4-client-coloc conf/{client mds mon osd} mount/fuse objectstore/bluestore-bitmap overrides/{frag_enable no_client_pidfile whitelist_health whitelist_wrongly_marked_down} supported-random-distros$/{rhel_latest} tasks/cfuse_workunit_quota} 2
pass 5121220 2020-06-06 05:07:37 2020-06-06 06:27:34 2020-06-06 09:05:38 2:38:04 2:12:03 0:26:01 smithi py2 ubuntu 16.04 powercycle/osd/{clusters/3osd-1per-target objectstore/bluestore-stupid powercycle/default supported-all-distro/ubuntu_16.04 tasks/radosbench thrashosds-health whitelist_health} 4
pass 5121207 2020-06-06 05:07:32 2020-06-06 05:49:24 2020-06-06 06:41:25 0:52:01 0:16:58 0:35:03 smithi master centos 7.8 powercycle/osd/{clusters/3osd-1per-target objectstore/bluestore-bitmap powercycle/default supported-all-distro/centos_latest tasks/admin_socket_objecter_requests thrashosds-health whitelist_health} 4
pass 5121187 2020-06-06 04:27:11 2020-06-06 05:39:20 2020-06-06 06:13:20 0:34:00 0:19:15 0:14:45 smithi master centos 8.1 multimds/thrash/{0-supported-random-distro$/{centos_8} begin ceph-thrash/mds clusters/9-mds-3-standby conf/{client mds mon osd} mount/fuse msgr-failures/none objectstore-ec/bluestore-comp-ec-root overrides/{fuse-default-perm-no thrash/{frag_enable whitelist_health whitelist_wrongly_marked_down} thrash_debug} tasks/cfuse_workunit_suites_pjd} 3
pass 5121087 2020-06-06 03:21:04 2020-06-06 04:23:00 2020-06-06 05:05:00 0:42:00 0:29:56 0:12:04 smithi master krbd/fsx/{ceph/ceph clusters/3-node conf features/object-map objectstore/bluestore-bitmap striping/default/{msgr-failures/many randomized-striping-off} tasks/fsx-3-client} 3
pass 5121077 2020-06-06 03:20:55 2020-06-06 04:17:03 2020-06-06 05:41:04 1:24:01 0:25:19 0:58:42 smithi master krbd/wac/wac/{bluestore-bitmap ceph/ceph clusters/fixed-3 conf tasks/wac verify/many-resets} 3
pass 5120616 2020-06-06 00:26:20 2020-06-06 00:26:30 2020-06-06 04:24:36 3:58:06 3:43:00 0:15:06 smithi master ubuntu 18.04 rados/upgrade/nautilus-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/nautilus.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-octopus.yaml 8-workload/{rbd-python.yaml snaps-many-objects.yaml} bluestore-bitmap.yaml thrashosds-health.yaml ubuntu_latest.yaml} 4
pass 5120541 2020-06-05 20:38:07 2020-06-05 20:38:15 2020-06-05 21:14:15 0:36:00 0:28:16 0:07:44 smithi master centos 8.1 rgw/verify/{centos_latest.yaml clusters/fixed-2.yaml frontend/civetweb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml overrides.yaml proto/http.yaml rgw_pool_type/replicated.yaml sharding$/{single.yaml} striping$/{stripe-greater-than-chunk.yaml} tasks/{0-install.yaml cls.yaml ragweed.yaml reshard.yaml s3tests-java.yaml s3tests.yaml} validater/lockdep.yaml} 2
pass 5120465 2020-06-05 20:03:32 2020-06-05 20:03:47 2020-06-05 20:29:46 0:25:59 0:13:15 0:12:44 smithi master ubuntu 18.04 rados/cephadm/smoke-roleless/{distro/ubuntu_18.04_podman.yaml start.yaml} 2
pass 5120012 2020-06-05 15:41:28 2020-06-05 18:55:01 2020-06-05 19:17:00 0:21:59 0:11:34 0:10:25 smithi py2 ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-stupid openstack settings/optimized ubuntu_latest workloads/fio_4M_rand_write} 1
pass 5119942 2020-06-05 15:40:22 2020-06-05 18:25:44 2020-06-05 18:57:43 0:31:59 0:24:39 0:07:20 smithi py2 centos 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/osd-delay objectstore/filestore-xfs rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_latest} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=2-m=1} 2
pass 5119873 2020-06-05 15:39:16 2020-06-05 17:56:24 2020-06-05 18:26:23 0:29:59 0:13:23 0:16:36 smithi py2 ubuntu 18.04 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/fastclose objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3} 3
pass 5119703 2020-06-05 14:37:09 2020-06-05 15:11:45 2020-06-05 15:33:45 0:22:00 0:09:20 0:12:40 smithi master rgw/multifs/{clusters/fixed-2.yaml frontend/civetweb.yaml objectstore/filestore-xfs.yaml overrides.yaml rgw_pool_type/ec-profile.yaml tasks/rgw_ragweed.yaml} 2
fail 5119652 2020-06-05 14:36:33 2020-06-05 14:41:07 2020-06-05 15:15:07 0:34:00 0:28:23 0:05:37 smithi master centos 8.1 rgw/verify/{centos_latest.yaml clusters/fixed-2.yaml frontend/civetweb.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml overrides.yaml proto/http.yaml rgw_pool_type/ec-profile.yaml sharding$/{default.yaml} striping$/{stripe-equals-chunk.yaml} tasks/{0-install.yaml cls.yaml ragweed.yaml reshard.yaml s3tests-java.yaml s3tests.yaml} validater/lockdep.yaml} 2
Failure Reason:

"2020-06-05T14:54:05.610013+0000 mon.b (mon.0) 153 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log

pass 5119632 2020-06-05 14:24:54 2020-06-05 14:27:29 2020-06-05 18:03:33 3:36:04 1:43:58 1:52:06 smithi py2 ubuntu 18.04 upgrade:nautilus-x/stress-split-erasure-code/{0-cluster/{openstack start} 1-nautilus-install/nautilus 1.1-pg-log-overrides/short_pg_log 2-partial-upgrade/firsthalf 3-thrash/default 3.1-objectstore/filestore-xfs 4-ec-workload/{rados-ec-workload rbd-ec-workload} 5-finish-upgrade 6-octopus 7-final-workload thrashosds-health ubuntu_latest} 5
fail 5119603 2020-06-05 13:13:43 2020-06-05 13:49:36 2020-06-05 14:13:35 0:23:59 0:09:52 0:14:07 smithi master centos 8.1 rados:cephadm/with-work/{distro/centos_latest fixed-2 mode/root msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi117 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph-ci/ceph:ef66e1bc4d611e10aee43b698f822996673b3fe4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1893cb46-a736-11ea-a06b-001a4aab830c -- ceph orch daemon add osd smithi117:vg_nvme/lv_4'

fail 5119585 2020-06-05 13:13:26 2020-06-05 13:43:21 2020-06-05 13:59:19 0:15:58 0:09:32 0:06:26 smithi master rhel 8.0 rados:cephadm/smoke-roleless/{distro/rhel_8.0 start} 2
Failure Reason:

Command failed on smithi117 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph-ci/ceph:ef66e1bc4d611e10aee43b698f822996673b3fe4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1d229892-a734-11ea-a06b-001a4aab830c -- ceph orch daemon add osd smithi117:vg_nvme/lv_4'

fail 5119560 2020-06-05 13:13:03 2020-06-05 13:23:52 2020-06-05 13:43:51 0:19:59 0:09:16 0:10:43 smithi master centos 7.6 rados:cephadm/smoke/{distro/centos_7 fixed-2 start} 2
Failure Reason:

Command failed on smithi117 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph-ci/ceph:ef66e1bc4d611e10aee43b698f822996673b3fe4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fcbd512a-a731-11ea-a06b-001a4aab830c -- ceph orch daemon add osd smithi117:vg_nvme/lv_4'