Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi096.front.sepia.ceph.com smithi True True 2024-04-27 00:16:20.201810 scheduled_yuriw@teuthology ubuntu 22.04 x86_64 /home/teuthworker/archive/yuriw-2024-04-26_18:18:24-rados-wip-yuri3-testing-2024-04-05-0825-distro-default-smithi/7675069
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7675301 2024-04-26 19:35:24 2024-04-26 23:10:27 2024-04-26 23:59:45 0:49:18 0:38:44 0:10:34 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/postgres}} 3
Failure Reason:

Command failed on smithi096 with status 110: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'ceph fs subvolumegroup pin cephfs qa random 0.10'"

running 7675069 2024-04-26 18:22:32 2024-04-27 00:16:20 2024-04-27 01:23:50 1:09:16 smithi main ubuntu 22.04 rados/upgrade/parallel/{0-random-distro$/{ubuntu_22.04} 0-start 1-tasks mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}} 2
fail 7675045 2024-04-26 18:22:07 2024-04-27 00:00:31 2024-04-27 00:11:26 0:10:55 0:04:55 0:06:00 smithi main centos 9.stream rados/cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_cephadm} 1
Failure Reason:

Command failed on smithi096 with status 1: 'sudo yum -y install ceph-mgr-dashboard'

dead 7674685 2024-04-26 10:55:04 2024-04-26 10:59:29 2024-04-26 23:12:25 12:12:56 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-quiesce/with-quiesce 6-workunit/postgres}} 3
Failure Reason:

hit max job timeout

fail 7674600 2024-04-26 02:15:15 2024-04-26 02:16:23 2024-04-26 02:39:20 0:22:57 0:16:14 0:06:43 smithi main centos 9.stream crimson-rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} clusters/{fixed-2} crimson-supported-all-distro/centos_latest crimson_qa_overrides deploy/ceph objectstore/bluestore thrashers/simple thrashosds-health workloads/pool-snaps-few-objects} 2
Failure Reason:

Command failed on smithi096 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph pg dump --format=json'

fail 7674593 2024-04-26 02:09:30 2024-04-26 04:36:01 2024-04-26 08:23:15 3:47:14 3:37:04 0:10:10 smithi main centos 9.stream upgrade/cephfs/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed (workunit test suites/fsstress.sh) on smithi130 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=b22e2ebdeb24376882b7bda2a7329c8cccc2276a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 CEPH_MNT=/home/ubuntu/cephtest/mnt.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/suites/fsstress.sh'

fail 7674524 2024-04-26 01:30:04 2024-04-26 04:02:33 2024-04-26 04:22:08 0:19:35 0:06:41 0:12:54 smithi main centos 8.stream rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/quincy backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/radosbench} 3
Failure Reason:

Command failed on smithi106 with status 1: 'sudo yum -y install ceph-radosgw'

pass 7674458 2024-04-26 01:28:49 2024-04-26 03:31:03 2024-04-26 04:03:03 0:32:00 0:24:14 0:07:46 smithi main centos 9.stream rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-comp-zstd rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=clay-k=4-m=2} 3
pass 7674419 2024-04-26 01:28:07 2024-04-26 03:12:36 2024-04-26 03:31:50 0:19:14 0:12:26 0:06:48 smithi main centos 9.stream rados/cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_iscsi_container/{centos_9.stream test_iscsi_container}} 1
pass 7674365 2024-04-26 01:27:09 2024-04-26 02:47:01 2024-04-26 03:12:36 0:25:35 0:15:58 0:09:37 smithi main ubuntu 22.04 rados/perf/{ceph mon_election/classic objectstore/bluestore-bitmap openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/radosbench_4M_rand_read} 1
fail 7674242 2024-04-26 01:03:32 2024-04-26 01:12:17 2024-04-26 02:15:24 1:03:07 0:50:54 0:12:13 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-quiesce/with-quiesce 6-workunit/suites/dbench}} 3
Failure Reason:

Command failed (workunit test suites/dbench.sh) on smithi072 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=afa1933b0fdbb6c99c947d1eda34d661d23cd327 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/dbench.sh'

pass 7674125 2024-04-25 22:33:20 2024-04-26 10:23:23 2024-04-26 10:59:47 0:36:24 0:24:57 0:11:27 smithi main centos 9.stream powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-comp-lz4 powercycle/default supported-all-distro/centos_latest tasks/snaps-many-objects thrashosds-health} 4
pass 7674097 2024-04-25 22:32:52 2024-04-26 09:46:26 2024-04-26 10:26:23 0:39:57 0:27:34 0:12:23 smithi main ubuntu 20.04 powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-hybrid powercycle/default supported-all-distro/ubuntu_20.04 tasks/snaps-many-objects thrashosds-health} 4
pass 7674081 2024-04-25 21:33:11 2024-04-25 22:47:32 2024-04-25 23:17:27 0:29:55 0:22:35 0:07:20 smithi main centos 9.stream powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-low-osd-mem-target powercycle/default supported-distros/centos_latest tasks/snaps-many-objects thrashosds-health} 4
pass 7674057 2024-04-25 21:32:47 2024-04-25 22:17:08 2024-04-25 22:48:08 0:31:00 0:23:11 0:07:49 smithi main centos 9.stream powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-low-osd-mem-target powercycle/default supported-distros/centos_latest tasks/cfuse_workunit_suites_ffsb thrashosds-health} 4
pass 7674039 2024-04-25 21:32:29 2024-04-25 21:48:46 2024-04-25 22:18:56 0:30:10 0:22:22 0:07:48 smithi main centos 9.stream powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-comp-zstd powercycle/default supported-distros/centos_latest tasks/snaps-many-objects thrashosds-health} 4
pass 7673976 2024-04-25 21:05:38 2024-04-26 00:47:46 2024-04-26 01:13:30 0:25:44 0:13:22 0:12:22 smithi main ubuntu 22.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/connectivity random-objectstore$/{bluestore-low-osd-mem-target} supported-random-distro$/{ubuntu_latest} tasks/crash} 2
pass 7673925 2024-04-25 21:04:43 2024-04-26 00:24:33 2024-04-26 00:48:41 0:24:08 0:13:43 0:10:25 smithi main ubuntu 22.04 rados/singleton/{all/max-pg-per-osd.from-replica mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest}} 1
pass 7673889 2024-04-25 21:04:06 2024-04-26 00:05:16 2024-04-26 00:24:53 0:19:37 0:09:39 0:09:58 smithi main ubuntu 22.04 rados/objectstore/{backends/objectstore-memstore supported-random-distro$/{ubuntu_latest}} 1
pass 7673851 2024-04-25 21:03:29 2024-04-25 23:43:25 2024-04-26 00:05:16 0:21:51 0:15:19 0:06:32 smithi main centos 9.stream rados/cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/classic task/test_set_mon_crush_locations} 3