Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi114.front.sepia.ceph.com smithi True False centos 9 x86_64 /home/teuthworker/archive/pdonnell-2024-05-08_22:06:20-fs-wip-pdonnell-testing-20240508.183908-debug-distro-default-smithi/7699062
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7699532 2024-05-09 03:11:05 2024-05-09 03:57:07 2024-05-09 04:18:26 0:21:19 0:11:28 0:09:51 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_ca_signed_key} 2
pass 7699062 2024-05-08 22:10:24 2024-05-09 04:17:20 2024-05-09 04:43:43 0:26:23 0:15:30 0:10:53 smithi main centos 9.stream fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-2c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{client-shutdown frag ignorelist_health ignorelist_wrongly_marked_down multifs pg_health session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_suites_pjd}} 2
pass 7699005 2024-05-08 22:09:23 2024-05-09 02:09:31 2024-05-09 03:57:14 1:47:43 1:37:56 0:09:47 smithi main centos 9.stream fs/volumes/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} tasks/volumes/{overrides test/basic}} 2
pass 7698952 2024-05-08 22:08:28 2024-05-09 01:16:29 2024-05-09 02:10:03 0:53:34 0:43:13 0:10:21 smithi main ubuntu 22.04 fs/verify/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a5s-mds-1c-client conf/{client mds mgr mon osd} distro/{ubuntu/{overrides ubuntu_latest}} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug pg_health session_timeout} ranks/3 tasks/dbench validater/lockdep} 2
fail 7698897 2024-05-08 22:07:29 2024-05-09 00:24:28 2024-05-09 01:09:32 0:45:04 0:35:48 0:09:16 smithi main centos 9.stream fs/cephadm/multivolume/{0-start 1-mount 2-workload/dbench distro/single-container-host overrides/{ignorelist_health pg_health}} 2
Failure Reason:

Command failed (workunit test suites/dbench.sh) on smithi153 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=c9da4049f638c0f84bdd9b4e33c42e86046ac554 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 CEPH_MNT=/home/ubuntu/cephtest/mnt.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/suites/dbench.sh'

pass 7698867 2024-05-08 22:06:57 2024-05-08 23:51:17 2024-05-09 00:24:20 0:33:03 0:23:28 0:09:35 smithi main centos 9.stream fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-2c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{client-shutdown frag ignorelist_health ignorelist_wrongly_marked_down multifs pg_health session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/ffsb}} 2
pass 7698213 2024-05-08 20:12:11 2024-05-08 20:17:46 2024-05-08 21:02:17 0:44:31 0:33:57 0:10:34 smithi main centos 9.stream fs:upgrade:mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7698113 2024-05-08 19:25:34 2024-05-08 22:09:58 2024-05-08 23:51:16 1:41:18 1:28:04 0:13:14 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/fs/misc}} 3
fail 7698049 2024-05-08 19:24:19 2024-05-08 21:01:51 2024-05-08 21:55:41 0:53:50 0:44:12 0:09:38 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (51) after waiting for 300 seconds

pass 7697985 2024-05-08 19:23:07 2024-05-08 19:49:11 2024-05-08 20:19:03 0:29:52 0:20:17 0:09:35 smithi main ubuntu 22.04 fs/traceless/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress traceless/50pc} 2
pass 7697912 2024-05-08 17:45:13 2024-05-08 18:14:05 2024-05-08 18:38:56 0:24:51 0:13:35 0:11:16 smithi main ubuntu 22.04 rgw/service-token/{clusters/fixed-1 frontend/beast ignore-pg-availability overrides tasks/service-token ubuntu_latest} 1
pass 7697871 2024-05-08 17:44:21 2024-05-08 17:46:49 2024-05-08 18:15:14 0:28:25 0:15:57 0:12:28 smithi main ubuntu 22.04 rgw/singleton/{all/radosgw-admin frontend/beast ignore-pg-availability objectstore/bluestore-bitmap overrides rgw_pool_type/ec supported-random-distro$/{ubuntu_latest}} 2
pass 7697804 2024-05-08 15:06:54 2024-05-08 17:22:23 2024-05-08 17:48:21 0:25:58 0:14:55 0:11:03 smithi main ubuntu 22.04 rados/multimon/{clusters/6 mon_election/classic msgr-failures/few msgr/async-v2only no_pools objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest} tasks/mon_recovery} 2
pass 7697769 2024-05-08 15:06:11 2024-05-08 17:04:46 2024-05-08 17:22:24 0:17:38 0:08:55 0:08:43 smithi main ubuntu 22.04 rados/objectstore/{backends/objectstore-bluestore-b supported-random-distro$/{ubuntu_latest}} 1
pass 7697706 2024-05-08 15:04:56 2024-05-08 16:28:14 2024-05-08 17:04:42 0:36:28 0:26:19 0:10:09 smithi main ubuntu 22.04 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-comp-zstd rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=clay-k=4-m=2} 3
pass 7697678 2024-05-08 15:04:23 2024-05-08 16:05:07 2024-05-08 16:28:14 0:23:07 0:12:44 0:10:23 smithi main centos 9.stream rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/repave-all} 2
pass 7697629 2024-05-08 15:03:24 2024-05-08 15:40:04 2024-05-08 16:05:07 0:25:03 0:15:21 0:09:42 smithi main centos 9.stream rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
pass 7697542 2024-05-08 15:01:35 2024-05-08 15:02:36 2024-05-08 15:40:01 0:37:25 0:25:32 0:11:53 smithi main centos 9.stream rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-stupid rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_latest} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
fail 7697482 2024-05-08 13:39:38 2024-05-08 14:09:30 2024-05-08 14:28:28 0:18:58 0:07:27 0:11:31 smithi main centos 9.stream orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi114 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:7c8f650b36e258f639fa4a83becade57cbfd2009-aarch64 pull'

fail 7697372 2024-05-08 05:25:59 2024-05-08 06:55:30 2024-05-08 07:11:10 0:15:40 0:04:50 0:10:50 smithi main centos 9.stream orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 3
Failure Reason:

Command failed on smithi114 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:7c8f650b36e258f639fa4a83becade57cbfd2009 pull'