Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi190.front.sepia.ceph.com smithi True True 2024-05-27 14:16:11.469907 scheduled_teuthology@teuthology centos 9 x86_64 /home/teuthworker/archive/teuthology-2024-05-20_20:08:15-orch-main-distro-default-smithi/7716417
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7727732 2024-05-27 06:48:21 2024-05-27 10:42:05 2024-05-27 11:43:10 1:01:05 0:51:37 0:09:28 smithi main centos 9.stream fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-2c-client conf/{client mds mgr mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{client-shutdown frag ignorelist_health ignorelist_wrongly_marked_down multifs pg_health session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_snaptests}} 2
fail 7727702 2024-05-27 06:47:48 2024-05-27 09:28:41 2024-05-27 10:41:55 1:13:14 1:01:04 0:12:10 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-quiesce/with-quiesce 6-workunit/kernel_untar_build}} 3
Failure Reason:

Command failed (workunit test kernel_untar_build.sh) on smithi031 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a6507388dd5057528934822c0163b0c347ef1d5d TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/kernel_untar_build.sh'

dead 7727697 2024-05-27 06:47:42 2024-05-27 09:15:47 2024-05-27 09:17:51 0:02:04 smithi main centos 9.stream fs/verify/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a5s-mds-1c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug pg_health session_timeout} ranks/3 tasks/dbench validater/valgrind} 2
Failure Reason:

Error reimaging machines: Failed to power on smithi190

fail 7727660 2024-05-27 05:54:08 2024-05-27 07:08:36 2024-05-27 09:16:44 2:08:08 1:59:01 0:09:07 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-quiesce/no 6-workunit/fs/misc}} 3
Failure Reason:

Command failed on smithi031 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:38f9d900c83c63799fbdbe61acc9a11b0d3554a6 shell --fsid 1d08f042-1bfa-11ef-bc9b-c7b262605968 -- ceph daemon mds.a perf dump'

fail 7727615 2024-05-27 05:53:20 2024-05-27 06:15:11 2024-05-27 07:09:58 0:54:47 0:42:27 0:12:20 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-quiesce/no 6-workunit/suites/fsstress}} 3
Failure Reason:

Command failed on smithi007 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:38f9d900c83c63799fbdbe61acc9a11b0d3554a6 shell --fsid 2fe68fe6-1bf3-11ef-bc9b-c7b262605968 -- ceph daemon mds.a perf dump'

pass 7727545 2024-05-27 00:24:37 2024-05-27 02:55:04 2024-05-27 04:41:10 1:46:06 1:33:29 0:12:37 smithi main ubuntu 20.04 upgrade:octopus-x/stress-split-erasure-code-no-cephadm/{0-cluster/{openstack start} 1-octopus-install/octopus 1.1-pg-log-overrides/short_pg_log 2-partial-upgrade/firsthalf 3-thrash/default 3.1-objectstore/filestore-xfs 4-ec-workload/{rados-ec-workload rbd-ec-workload} 5-finish-upgrade 6-quincy 7-final-workload mon_election/classic thrashosds-health ubuntu_20.04} 5
pass 7727505 2024-05-26 22:06:05 2024-05-27 12:53:06 2024-05-27 13:27:48 0:34:42 0:21:31 0:13:11 smithi main centos 8.stream rados/multimon/{clusters/21 mon_election/connectivity msgr-failures/many msgr/async-v1only no_pools objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} tasks/mon_recovery} 3
pass 7727484 2024-05-26 22:05:44 2024-05-27 12:31:24 2024-05-27 12:56:39 0:25:15 0:18:26 0:06:49 smithi main rhel 8.6 rados/cephadm/osds/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
pass 7727409 2024-05-26 22:04:30 2024-05-27 11:50:21 2024-05-27 12:31:22 0:41:01 0:32:01 0:09:00 smithi main centos 9.stream rados/monthrash/{ceph clusters/3-mons mon_election/connectivity msgr-failures/mon-delay msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{centos_latest} thrashers/one workloads/rados_api_tests} 2
dead 7727398 2024-05-26 22:04:19 2024-05-27 11:43:13 2024-05-27 11:44:17 0:01:04 smithi main centos 8.stream rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-bitmap rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
Failure Reason:

Error reimaging machines: Failed to power on smithi060

pass 7727296 2024-05-26 22:02:36 2024-05-27 05:50:23 2024-05-27 06:15:31 0:25:08 0:15:32 0:09:36 smithi main centos 8.stream rados/cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
pass 7727240 2024-05-26 22:01:39 2024-05-27 05:16:27 2024-05-27 05:50:29 0:34:02 0:23:23 0:10:39 smithi main ubuntu 20.04 rados/cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
fail 7727065 2024-05-26 21:25:35 2024-05-26 22:19:26 2024-05-27 01:10:33 2:51:07 2:39:59 0:11:08 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/kernel_untar_build}} 3
Failure Reason:

"2024-05-26T22:54:01.680172+0000 mds.b (mds.0) 59 : cluster [WRN] Scrub error on inode 0x10000002baa (/volumes/qa/sv_0/6093c496-f50e-420f-a543-e21bbf5d9c72/client.0/tmp/t/linux-6.5.11/arch/arm/boot/dts) see mds.b log and `damage ls` output for details" in cluster log

pass 7727030 2024-05-26 21:24:59 2024-05-26 21:45:27 2024-05-26 22:21:18 0:35:51 0:25:02 0:10:49 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-comp omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/fs/norstats}} 3
fail 7726917 2024-05-26 21:05:33 2024-05-27 02:14:43 2024-05-27 02:56:55 0:42:12 0:31:04 0:11:08 smithi main centos 8.stream rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/reef backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/connectivity msgr-failures/fastclose rados thrashers/none thrashosds-health workloads/cache-snaps} 3
Failure Reason:

"2024-05-27T02:37:14.486934+0000 mon.a (mon.0) 208 : cluster [WRN] Health detail: HEALTH_WARN 1/3 mons down, quorum a,c" in cluster log

pass 7726828 2024-05-26 21:04:07 2024-05-27 01:37:15 2024-05-27 02:15:40 0:38:25 0:27:51 0:10:34 smithi main ubuntu 22.04 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=clay-k=4-m=2} 3
pass 7726774 2024-05-26 21:03:12 2024-05-27 01:10:31 2024-05-27 01:37:35 0:27:04 0:17:24 0:09:40 smithi main centos 9.stream rados/valgrind-leaks/{1-start 2-inject-leak/osd centos_latest} 1
pass 7726710 2024-05-26 21:02:09 2024-05-26 21:10:36 2024-05-26 21:46:14 0:35:38 0:25:49 0:09:49 smithi main ubuntu 22.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/enable mon_election/classic random-objectstore$/{bluestore-low-osd-mem-target} supported-random-distro$/{ubuntu_latest} tasks/module_selftest} 2
pass 7726370 2024-05-26 14:12:51 2024-05-26 14:20:14 2024-05-26 15:20:13 0:59:59 0:47:22 0:12:37 smithi main centos 9.stream fs:functional/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} subvol_versions/create_subvol_version_v1 tasks/quiesce} 2
fail 7726260 2024-05-26 05:24:47 2024-05-26 07:24:23 2024-05-26 07:59:41 0:35:18 0:27:11 0:08:07 smithi main rhel 8.6 smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{rhel_8} tasks/{0-install test/rados_python}} 3
Failure Reason:

Command failed (workunit test rados/test_python.sh) on smithi190 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=02dd4a0049517eb4889baa50cc36ffa32d7c2440 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_python.sh'