Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi109.front.sepia.ceph.com smithi True True 2022-10-12 02:00:34.290552 scheduled_teuthology@teuthology ubuntu 20.04 x86_64 /home/teuthworker/archive/teuthology-2022-10-11_04:17:02-fs-pacific-distro-default-smithi/7062792
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7063102 2022-10-11 15:02:35 2022-10-11 16:25:45 2022-10-11 21:38:17 5:12:32 5:02:48 0:09:44 smithi main ubuntu 20.04 upgrade:quincy-p2p/quincy-p2p-stress-split/{0-cluster/{openstack start} 1-ceph-install/quincy 1.1.short_pg_log 2-partial-upgrade/firsthalf 3-thrash/default 4-workload/{fsx radosbench rbd-cls rbd-import-export rbd_api readwrite snaps-few-objects} 5-finish-upgrade 6-final-workload/{rbd-python snaps-many-objects} objectstore/bluestore-bitmap supported-all-distro/ubuntu_latest thrashosds-health} 3
Failure Reason:

"2022-10-11T17:40:39.714974+0000 mon.a (mon.0) 7163 : cluster [WRN] pool 'test-librbd-smithi169-14140-7' is full (reached quota's max_bytes: 10 MiB)" in cluster log

pass 7063056 2022-10-11 11:13:20 2022-10-11 11:14:42 2022-10-11 11:37:42 0:23:00 0:17:18 0:05:42 smithi main centos 8.stream fs:upgrade/upgraded_client/from_pacific/{bluestore-bitmap centos_latest clusters/{1a3s-mds-2c-client} conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-client-upgrade 2-workload/fsstress}} 2
fail 7062992 2022-10-11 07:05:04 2022-10-11 07:07:03 2022-10-11 07:55:24 0:48:21 0:33:17 0:15:04 smithi main ubuntu 20.04 upgrade:quincy-p2p:quincy-p2p-parallel/{point-to-point-upgrade supported-all-distro/ubuntu_latest} 3
Failure Reason:

Command failed on smithi109 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph pg dump --format=json'

pass 7062973 2022-10-11 05:46:35 2022-10-11 05:56:56 2022-10-11 06:18:15 0:21:19 0:13:52 0:07:27 smithi main centos 8.stream rados:thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-dispatch-delay msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/admin_socket_objecter_requests} 2
pass 7062942 2022-10-11 05:01:00 2022-10-11 06:18:24 2022-10-11 06:45:31 0:27:07 0:20:48 0:06:19 smithi main centos 8.stream smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{centos_8} tasks/{0-install test/rgw_s3tests}} 3
fail 7062882 2022-10-11 04:27:27 2022-10-11 14:12:27 2022-10-11 14:30:41 0:18:14 0:09:15 0:08:59 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools_crun fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi083 with status 22: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:791e341cf964b37e08609aaabf8bf023a2136675 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 948868d6-4970-11ed-8437-001a4aab830c -- ceph orch device zap smithi083 /dev/vg_nvme/lv_4 --force'

fail 7062855 2022-10-11 04:26:56 2022-10-11 13:56:35 2022-10-11 14:13:46 0:17:11 0:09:29 0:07:42 smithi main centos 8.stream orch:cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi083 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:791e341cf964b37e08609aaabf8bf023a2136675 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 34b642c2-496e-11ed-8437-001a4aab830c -- ceph orch device zap smithi083 /dev/vg_nvme/lv_4 --force'

fail 7062821 2022-10-11 04:26:21 2022-10-11 13:41:23 2022-10-11 13:56:46 0:15:23 0:05:40 0:09:43 smithi main centos 8.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
Failure Reason:

Command failed on smithi083 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f6a2fa4a-496b-11ed-8437-001a4aab830c -- ceph orch daemon add osd smithi083:vg_nvme/lv_4'

fail 7062813 2022-10-11 04:26:12 2022-10-11 13:34:09 2022-10-11 13:43:43 0:09:34 0:03:02 0:06:32 smithi main centos 8.stream orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}} 1
Failure Reason:

Command failed on smithi109 with status 1: 'TESTDIR=/home/ubuntu/cephtest bash -s'

running 7062792 2022-10-11 04:23:36 2022-10-12 02:00:24 2022-10-12 14:04:24 48 days, 14:54:43 smithi main ubuntu 20.04 fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{frag prefetch_dirfrags/no races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/5 tasks/{1-thrash/mds 2-workunit/suites/ffsb}} 2
pass 7062767 2022-10-11 04:23:05 2022-10-12 01:30:18 2022-10-12 02:00:32 0:30:14 0:14:55 0:15:19 smithi main ubuntu 20.04 fs/multiclient/{begin clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{whitelist_health whitelist_wrongly_marked_down} tasks/mdtest} 4
pass 7062745 2022-10-11 04:22:38 2022-10-12 00:58:25 2022-10-12 01:31:36 0:33:11 0:17:39 0:15:32 smithi main ubuntu 20.04 fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{frag prefetch_dirfrags/no races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/3 tasks/{1-thrash/osd 2-workunit/fs/trivial_sync}} 2
pass 7062697 2022-10-11 04:21:39 2022-10-12 00:06:47 2022-10-12 01:03:59 0:57:12 0:50:24 0:06:48 smithi main rhel 8.4 fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp-ec-root overrides/{frag prefetch_dirfrags/yes races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/3 tasks/{1-thrash/mon 2-workunit/fs/snaps}} 2
pass 7062670 2022-10-11 04:21:06 2022-10-11 23:32:22 2022-10-12 00:06:51 0:34:29 0:24:28 0:10:01 smithi main ubuntu 20.04 fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-ec-root overrides/{frag prefetch_dirfrags/yes races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/1 tasks/{1-thrash/osd 2-workunit/suites/fsstress}} 2
pass 7062641 2022-10-11 04:20:31 2022-10-11 23:04:16 2022-10-11 23:32:12 0:27:56 0:21:25 0:06:31 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{multimds/yes pg-warn whitelist_health whitelist_wrongly_marked_down} tasks/{0-nautilus 1-client 2-upgrade 3-compat_client/pacific}} 3
pass 7062622 2022-10-11 04:20:08 2022-10-11 22:48:06 2022-10-11 23:05:06 0:17:00 0:10:38 0:06:22 smithi main centos 8.stream fs/permission/{begin clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{whitelist_health whitelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
pass 7062600 2022-10-11 04:19:41 2022-10-11 22:20:20 2022-10-11 22:47:57 0:27:37 0:18:04 0:09:33 smithi main centos 8.stream fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{multimds/no pg-warn whitelist_health whitelist_wrongly_marked_down} tasks/{0-nautilus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}} 3
pass 7062557 2022-10-11 04:18:48 2022-10-11 21:39:47 2022-10-11 22:22:43 0:42:56 0:36:53 0:06:03 smithi main rhel 8.4 fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{frag prefetch_dirfrags/yes races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/3 tasks/{1-thrash/mds 2-workunit/fs/snaps}} 2
fail 7062404 2022-10-11 04:01:35 2022-10-11 04:55:46 2022-10-11 05:57:23 1:01:37 0:50:59 0:10:38 smithi main ubuntu 20.04 fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps} 2
Failure Reason:

"1665465546.9398797 mon.a (mon.0) 188 : cluster [WRN] Health check failed: 1 MDSs behind on trimming (MDS_TRIM)" in cluster log

dead 7062029 2022-10-10 20:40:19 2022-10-10 20:49:30 2022-10-10 21:01:37 0:12:07 0:04:27 0:07:40 smithi main rhel 8.6 fs:upgrade/upgraded_client/from_pacific/{bluestore-bitmap clusters/{1-mds-2-client} conf/{client mds mon osd} kclient-overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-client-upgrade 2-workload/kernel_cfuse_workunits_dbench_iozone}} 4
Failure Reason:

{'smithi109.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['krb5-workstation'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': True}}, 'msg': "Loading repository 'rhel-8-for-x86_64-baseos-rpms' has failed", 'rc': 1, 'results': []}, 'smithi120.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['krb5-workstation'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': True}}, 'msg': "Loading repository 'rhel-8-for-x86_64-baseos-rpms' has failed", 'rc': 1, 'results': []}, 'smithi071.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['krb5-workstation'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': True}}, 'msg': "Loading repository 'rhel-8-for-x86_64-baseos-rpms' has failed", 'rc': 1, 'results': []}, 'smithi097.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['krb5-workstation'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': True}}, 'msg': "Loading repository 'rhel-8-for-x86_64-baseos-rpms' has failed", 'rc': 1, 'results': []}}