Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi148.front.sepia.ceph.com smithi True True 2022-01-18 10:59:42.677867 scheduled_teuthology@teuthology centos 8 x86_64 /home/teuthworker/archive/teuthology-2022-01-18_03:15:03-fs-master-distro-default-smithi/6622823
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 6623901 2022-01-18 09:44:09 2022-01-18 10:12:32 2022-01-18 10:34:20 0:21:48 0:11:05 0:10:43 smithi master centos 8.stream rados:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-services/basic 3-final} 2
fail 6623835 2022-01-18 08:46:07 2022-01-18 08:57:08 2022-01-18 09:15:12 0:18:04 0:03:54 0:14:10 smithi master ubuntu 20.04 fs/multiclient/{begin clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp overrides/{whitelist_health whitelist_wrongly_marked_down} tasks/cephfs_misc_tests} 5
Failure Reason:

Command failed on smithi148 with status 100: 'sudo DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install ceph=17.0.0-10154-gd2422b0b-1focal cephadm=17.0.0-10154-gd2422b0b-1focal ceph-mds=17.0.0-10154-gd2422b0b-1focal ceph-mgr=17.0.0-10154-gd2422b0b-1focal ceph-common=17.0.0-10154-gd2422b0b-1focal ceph-fuse=17.0.0-10154-gd2422b0b-1focal ceph-test=17.0.0-10154-gd2422b0b-1focal ceph-volume=17.0.0-10154-gd2422b0b-1focal radosgw=17.0.0-10154-gd2422b0b-1focal python3-rados=17.0.0-10154-gd2422b0b-1focal python3-rgw=17.0.0-10154-gd2422b0b-1focal python3-cephfs=17.0.0-10154-gd2422b0b-1focal python3-rbd=17.0.0-10154-gd2422b0b-1focal libcephfs2=17.0.0-10154-gd2422b0b-1focal libcephfs-dev=17.0.0-10154-gd2422b0b-1focal librados2=17.0.0-10154-gd2422b0b-1focal librbd1=17.0.0-10154-gd2422b0b-1focal rbd-fuse=17.0.0-10154-gd2422b0b-1focal python3-cephfs=17.0.0-10154-gd2422b0b-1focal cephfs-shell=17.0.0-10154-gd2422b0b-1focal cephfs-top=17.0.0-10154-gd2422b0b-1focal cephfs-mirror=17.0.0-10154-gd2422b0b-1focal'

fail 6623829 2022-01-18 08:46:02 2022-01-18 08:46:13 2022-01-18 09:00:14 0:14:01 0:03:46 0:10:15 smithi master ubuntu 20.04 fs/multiclient/{begin clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-bitmap overrides/{whitelist_health whitelist_wrongly_marked_down} tasks/cephfs_misc_tests} 4
Failure Reason:

Command failed on smithi148 with status 100: 'sudo DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install ceph=17.0.0-10154-gd2422b0b-1focal cephadm=17.0.0-10154-gd2422b0b-1focal ceph-mds=17.0.0-10154-gd2422b0b-1focal ceph-mgr=17.0.0-10154-gd2422b0b-1focal ceph-common=17.0.0-10154-gd2422b0b-1focal ceph-fuse=17.0.0-10154-gd2422b0b-1focal ceph-test=17.0.0-10154-gd2422b0b-1focal ceph-volume=17.0.0-10154-gd2422b0b-1focal radosgw=17.0.0-10154-gd2422b0b-1focal python3-rados=17.0.0-10154-gd2422b0b-1focal python3-rgw=17.0.0-10154-gd2422b0b-1focal python3-cephfs=17.0.0-10154-gd2422b0b-1focal python3-rbd=17.0.0-10154-gd2422b0b-1focal libcephfs2=17.0.0-10154-gd2422b0b-1focal libcephfs-dev=17.0.0-10154-gd2422b0b-1focal librados2=17.0.0-10154-gd2422b0b-1focal librbd1=17.0.0-10154-gd2422b0b-1focal rbd-fuse=17.0.0-10154-gd2422b0b-1focal python3-cephfs=17.0.0-10154-gd2422b0b-1focal cephfs-shell=17.0.0-10154-gd2422b0b-1focal cephfs-top=17.0.0-10154-gd2422b0b-1focal cephfs-mirror=17.0.0-10154-gd2422b0b-1focal'

pass 6623809 2022-01-18 07:11:51 2022-01-18 07:35:21 2022-01-18 08:18:06 0:42:45 0:32:12 0:10:33 smithi master centos 8.3 rgw:verify/{0-install centos_latest clusters/fixed-2 datacache/rgw-datacache frontend/beast ignore-pg-availability msgr-failures/few objectstore/filestore-xfs overrides proto/http rgw_pool_type/ec sharding$/{default} striping$/{stripe-equals-chunk} tasks/{cls ragweed reshard s3tests-java s3tests} validater/lockdep} 2
pass 6623286 2022-01-18 04:21:27 2022-01-18 10:34:07 2022-01-18 10:59:39 0:25:32 0:18:18 0:07:14 smithi master rhel 8.4 fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{frag races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/5 tasks/{1-thrash/mds 2-workunit/suites/iozone}} 2
pass 6623253 2022-01-18 04:21:00 2022-01-18 09:38:08 2022-01-18 10:12:46 0:34:38 0:23:05 0:11:33 smithi master ubuntu 20.04 fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{frag races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/5 tasks/{1-thrash/mon 2-workunit/suites/iozone}} 2
pass 6623223 2022-01-18 04:20:35 2022-01-18 09:15:23 2022-01-18 09:38:42 0:23:19 0:13:27 0:09:52 smithi master centos 8.2 fs/permission/{begin clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{whitelist_health whitelist_wrongly_marked_down} tasks/cfuse_workunit_misc} 2
pass 6623169 2022-01-18 04:19:50 2022-01-18 08:18:04 2022-01-18 08:46:10 0:28:06 0:20:24 0:07:42 smithi master rhel 8.4 fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/5 scrub/no standby-replay tasks/{0-check-counter workunit/suites/fsstress} wsync/{yes}} 3
running 6622823 2022-01-18 03:17:48 2022-01-18 10:58:42 2022-01-18 11:04:54 0:07:27 smithi master centos 8.stream fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8.stream} mount/fuse ms_mode/{secure} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/no standby-replay tasks/{0-check-counter workunit/suites/blogbench} wsync/{no}} 3
fail 6622777 2022-01-18 03:17:15 2022-01-18 03:50:25 2022-01-18 07:36:24 3:45:59 3:34:20 0:11:39 smithi master centos 8.3 fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8.stream} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{frag prefetch_dirfrags/yes races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/3 tasks/{1-thrash/osd 2-workunit/fs/snaps}} 2
Failure Reason:

"2022-01-18T05:27:42.249032+0000 mds.e (mds.0) 1 : cluster [WRN] client.4640 isn't responding to mclientcaps(revoke), ino 0x1000000a063 pending pAsLsXsFsc issued pAsLsXsFscb, sent 300.013836 seconds ago" in cluster log

pass 6622730 2022-01-18 03:16:38 2022-01-18 03:17:04 2022-01-18 03:50:24 0:33:20 0:22:05 0:11:15 smithi master rhel 8.4 fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8.stream} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/{legacy} objectstore-ec/bluestore-comp omap_limit/10 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/no standby-replay tasks/{0-check-counter workunit/suites/fsstress} wsync/{yes}} 3
pass 6622655 2022-01-17 20:17:59 2022-01-17 20:47:39 2022-01-17 21:09:52 0:22:13 0:12:08 0:10:05 smithi master centos 8.stream rados:cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
pass 6622628 2022-01-17 20:17:32 2022-01-17 20:17:55 2022-01-17 20:47:56 0:30:01 0:17:35 0:12:26 smithi master centos 8.2 rados:cephadm/smoke-roleless/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
pass 6622570 2022-01-17 17:16:21 2022-01-17 18:27:33 2022-01-17 18:55:10 0:27:37 0:13:00 0:14:37 smithi master centos 8.stream rados/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/rgw 3-final} 2
pass 6622525 2022-01-17 17:07:33 2022-01-17 17:32:47 2022-01-17 18:31:35 0:58:48 0:50:02 0:08:46 smithi master rhel 8.4 rados/cephadm/thrash/{0-distro/rhel_8.4_container_tools_rhel8 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v2only root} 2
pass 6622471 2022-01-17 16:30:16 2022-01-17 16:51:53 2022-01-17 17:33:06 0:41:13 0:31:52 0:09:21 smithi master centos 8.3 rgw:verify/{0-install centos_latest clusters/fixed-2 datacache/rgw-datacache frontend/beast ignore-pg-availability msgr-failures/few objectstore/bluestore-bitmap overrides proto/http rgw_pool_type/ec-profile sharding$/{single} striping$/{stripe-equals-chunk} tasks/{cls ragweed reshard s3tests-java s3tests} validater/lockdep} 2
fail 6622385 2022-01-17 14:56:23 2022-01-17 16:06:20 2022-01-17 16:51:56 0:45:36 0:38:15 0:07:21 smithi master rhel 8.4 rbd/mirror/{base/install clients/{mirror-extra mirror} cluster/{2-node openstack} msgr-failures/few objectstore/bluestore-low-osd-mem-target supported-random-distro$/{rhel_8} workloads/rbd-mirror-workunit-policy-simple} 2
Failure Reason:

Command failed (workunit test rbd/rbd_mirror_journal.sh) on smithi148 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.cluster1.mirror/client.mirror/tmp && cd -- /home/ubuntu/cephtest/mnt.cluster1.mirror/client.mirror/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d238de862197118eb4dfe9e422168d942c06f08c TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster cluster1" CEPH_ID="mirror" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.cluster1.client.mirror CEPH_ROOT=/home/ubuntu/cephtest/clone.cluster1.client.mirror CEPH_MNT=/home/ubuntu/cephtest/mnt.cluster1.mirror CEPH_ARGS=\'\' RBD_MIRROR_INSTANCES=4 RBD_MIRROR_USE_EXISTING_CLUSTER=1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.cluster1.client.mirror/qa/workunits/rbd/rbd_mirror_journal.sh'

pass 6622324 2022-01-17 13:44:11 2022-01-17 15:03:53 2022-01-17 15:40:50 0:36:57 0:26:00 0:10:57 smithi master ubuntu 20.04 rados:cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python} 2
pass 6622234 2022-01-17 13:40:54 2022-01-17 14:20:42 2022-01-17 15:04:17 0:43:35 0:34:26 0:09:09 smithi master centos 8.2 rados:cephadm/with-work/{0-distro/centos_8.2_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests} 2
pass 6622174 2022-01-17 13:39:57 2022-01-17 13:46:17 2022-01-17 14:20:52 0:34:35 0:23:48 0:10:47 smithi master centos 8.stream rados:cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-start-upgrade 4-wait 5-upgrade-ls mon_election/classic} 2