Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi111.front.sepia.ceph.com smithi True False ubuntu 22.04 x86_64 /home/teuthworker/archive/teuthology-2024-04-23_05:00:14-smoke-main-distro-default-smithi/7669469
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7669469 2024-04-23 05:01:17 2024-04-23 05:01:19 2024-04-23 05:35:40 0:34:21 0:22:07 0:12:14 smithi main ubuntu 22.04 smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{ubuntu_latest} tasks/{0-install test/cfuse_workunit_suites_blogbench}} 3
pass 7669450 2024-04-23 01:24:04 2024-04-23 01:25:38 2024-04-23 02:01:26 0:35:48 0:27:09 0:08:39 smithi main centos 9.stream crimson-rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} clusters/{fixed-2} crimson-supported-all-distro/centos_latest crimson_qa_overrides deploy/ceph objectstore/bluestore thrashers/simple thrashosds-health workloads/radosbench} 2
pass 7669130 2024-04-22 22:10:52 2024-04-23 02:00:47 2024-04-23 02:52:42 0:51:55 0:45:08 0:06:47 smithi main rhel 8.6 orch/cephadm/with-work/{0-distro/rhel_8.6_container_tools_rhel8 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests} 2
pass 7669038 2024-04-22 22:09:26 2024-04-23 00:54:43 2024-04-23 01:25:50 0:31:07 0:19:26 0:11:41 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
pass 7669001 2024-04-22 22:08:51 2024-04-23 00:26:07 2024-04-23 00:55:19 0:29:12 0:13:28 0:15:44 smithi main centos 8.stream orch/cephadm/smoke-small/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/off fixed-2 mon_election/classic start} 3
pass 7668976 2024-04-22 21:33:06 2024-04-23 00:10:05 2024-04-23 00:30:16 0:20:11 0:11:53 0:08:18 smithi main centos 9.stream powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-bitmap powercycle/default supported-distros/centos_latest tasks/cfuse_workunit_suites_pjd thrashosds-health} 4
pass 7668705 2024-04-22 20:12:52 2024-04-23 02:51:20 2024-04-23 03:14:26 0:23:06 0:13:35 0:09:31 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
fail 7668555 2024-04-22 20:10:31 2024-04-22 20:12:00 2024-04-23 00:01:38 3:49:38 3:36:54 0:12:44 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed (workunit test suites/fsstress.sh) on smithi138 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=8254cb5a247caa8e3d35e534a1fefe2362a0a4b8 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 CEPH_MNT=/home/ubuntu/cephtest/mnt.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/suites/fsstress.sh'

pass 7668489 2024-04-22 19:31:40 2024-04-22 19:32:36 2024-04-22 20:03:53 0:31:17 0:18:43 0:12:34 smithi main ubuntu 22.04 rgw/thrash/{clusters/fixed-2 frontend/beast ignore-pg-availability install objectstore/bluestore-bitmap s3tests-branch thrasher/default thrashosds-health ubuntu_latest workload/rgw_bucket_quota} 2
fail 7668433 2024-04-22 18:21:30 2024-04-22 18:22:02 2024-04-22 19:23:42 1:01:40 0:52:34 0:09:06 smithi main centos 9.stream rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind} 2
Failure Reason:

"2024-04-22T18:54:53.261384+0000 mon.a (mon.0) 851 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log

pass 7668315 2024-04-22 13:45:53 2024-04-22 13:46:49 2024-04-22 15:53:10 2:06:21 1:56:47 0:09:34 smithi main ubuntu 22.04 rgw/verify/{0-install accounts$/{main-tenant} clusters/fixed-2 datacache/rgw-datacache frontend/beast ignore-pg-availability inline-data$/{on} msgr-failures/few objectstore/bluestore-bitmap overrides proto/https rgw_pool_type/ec s3tests-branch sharding$/{default} striping$/{stripe-greater-than-chunk} supported-random-distro$/{ubuntu_latest} tasks/{bucket-check cls mp_reupload ragweed reshard s3tests-java s3tests versioning} validater/valgrind} 2
pass 7668268 2024-04-22 12:37:35 2024-04-22 12:58:39 2024-04-22 13:39:57 0:41:18 0:31:25 0:09:53 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-quiesce/with-quiesce 6-workunit/suites/fsync-tester}} 3
fail 7668221 2024-04-22 09:46:33 2024-04-22 09:46:42 2024-04-22 10:17:01 0:30:19 0:22:40 0:07:39 smithi main centos 9.stream rbd/iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} workloads/cephadm_iscsi} 3
Failure Reason:

"2024-04-22T10:00:26.087943+0000 mon.a (mon.0) 207 : cluster [WRN] Health check failed: 1/3 mons down, quorum a,c (MON_DOWN)" in cluster log

pass 7668060 2024-04-22 00:24:45 2024-04-22 03:49:26 2024-04-22 07:30:15 3:40:49 3:29:15 0:11:34 smithi main ubuntu 20.04 upgrade:octopus-x/stress-split-no-cephadm/{0-cluster/{openstack start} 1-ceph-install/octopus 1.1-pg-log-overrides/normal_pg_log 2-partial-upgrade/firsthalf 3-thrash/default 4-workload/{radosbench rbd-cls rbd-import-export rbd_api readwrite rgw_ragweed_prepare snaps-few-objects} 5-finish-upgrade 6-quincy 8-final-workload/{rbd-python snaps-many-objects} mon_election/connectivity objectstore/filestore-xfs thrashosds-health ubuntu_20.04} 5
pass 7667989 2024-04-21 22:05:42 2024-04-22 09:13:04 2024-04-22 09:47:19 0:34:15 0:24:26 0:09:49 smithi main centos 8.stream rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/crush-compat mon_election/connectivity msgr-failures/osd-dispatch-delay msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} thrashers/mapgap thrashosds-health workloads/small-objects-balanced} 2
pass 7667929 2024-04-21 22:04:39 2024-04-22 08:39:10 2024-04-22 09:13:36 0:34:26 0:22:01 0:12:25 smithi main ubuntu 20.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/connectivity random-objectstore$/{bluestore-comp-zlib} supported-random-distro$/{ubuntu_20.04} tasks/progress} 2
fail 7667858 2024-04-21 22:03:28 2024-04-22 07:56:06 2024-04-22 08:28:01 0:31:55 0:24:38 0:07:17 smithi main centos 9.stream rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_latest}} 1
Failure Reason:

Command failed (workunit test cephtool/test.sh) on smithi111 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=c3a345d31ea77c01771428ab80e1cf32c9b05875 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'

pass 7667818 2024-04-21 22:02:48 2024-04-22 07:29:12 2024-04-22 07:56:29 0:27:17 0:15:40 0:11:37 smithi main ubuntu 22.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/connectivity random-objectstore$/{bluestore-bitmap} supported-random-distro$/{ubuntu_latest} tasks/failover} 2
pass 7667718 2024-04-21 21:28:11 2024-04-22 00:45:43 2024-04-22 01:19:53 0:34:10 0:25:14 0:08:56 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/iozone}} 3
pass 7667643 2024-04-21 21:26:54 2024-04-21 23:38:10 2024-04-22 00:47:34 1:09:24 1:02:59 0:06:25 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/ffsb}} 3