Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi189.front.sepia.ceph.com smithi True True 2024-05-10 21:18:17.054475 scheduled_teuthology@teuthology centos 9 x86_64 /home/teuthworker/archive/teuthology-2024-05-10_21:08:03-orch-squid-distro-default-smithi/7701705
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7701705 2024-05-10 21:08:49 2024-05-10 21:16:16 2024-05-10 21:33:33 0:17:17 0:04:20 0:12:57 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

Command failed on smithi105 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:f72fecff68e1d400c4568684327c900485c20d6a pull'

pass 7701363 2024-05-10 15:47:58 2024-05-10 20:52:40 2024-05-10 21:18:14 0:25:34 0:14:54 0:10:40 smithi main centos 9.stream fs/functional/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} subvol_versions/create_subvol_version_v1 tasks/workunit/quota} 2
fail 7701228 2024-05-10 13:34:00 2024-05-10 13:46:32 2024-05-10 14:13:37 0:27:05 0:14:50 0:12:15 smithi main ubuntu 22.04 rgw/multifs/{clusters/fixed-2 frontend/beast ignore-pg-availability objectstore/bluestore-bitmap overrides rgw_pool_type/replicated s3tests-branch tasks/rgw_ragweed ubuntu_latest} 2
Failure Reason:

Command failed (ragweed tests against rgw) on smithi174 with status 1: "source /home/ubuntu/cephtest/tox-venv/bin/activate && cd /home/ubuntu/cephtest/ragweed.client.0 && RAGWEED_CONF=/home/ubuntu/cephtest/archive/ragweed.client.0.conf RAGWEED_STAGES=prepare BOTO_CONFIG=/home/ubuntu/cephtest/boto.cfg tox --sitepackages -- -v -m 'not fails_on_rgw'"

pass 7701183 2024-05-10 11:39:36 2024-05-10 12:10:41 2024-05-10 12:36:59 0:26:18 0:15:17 0:11:01 smithi main ubuntu 22.04 rgw/multifs/{clusters/fixed-2 frontend/beast ignore-pg-availability objectstore/bluestore-bitmap overrides rgw_pool_type/replicated s3tests-branch tasks/rgw_multipart_upload ubuntu_latest} 2
fail 7700922 2024-05-10 07:36:23 2024-05-10 10:36:41 2024-05-10 11:56:20 1:19:39 1:06:46 0:12:53 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-quiesce/no 6-workunit/suites/dbench}} 3
Failure Reason:

Command failed (workunit test suites/dbench.sh) on smithi027 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d39fa08867836ac070c47cb66c782fa559fd480b TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/dbench.sh'

fail 7700815 2024-05-10 05:15:00 2024-05-10 07:15:48 2024-05-10 07:49:07 0:33:19 0:06:53 0:26:26 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-quiesce/no 6-workunit/suites/fsstress}} 3
Failure Reason:

Command failed on smithi098 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:25eb20a061356442d4a1c711818ce2e5848c382d pull'

fail 7700771 2024-05-10 05:14:04 2024-05-10 07:00:06 2024-05-10 07:24:03 0:23:57 0:08:31 0:15:26 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-quiesce/with-quiesce 6-workunit/fs/misc}} 3
Failure Reason:

Command failed on smithi031 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:25eb20a061356442d4a1c711818ce2e5848c382d pull'

fail 7700729 2024-05-10 05:13:10 2024-05-10 06:29:04 2024-05-10 06:46:54 0:17:50 0:06:28 0:11:22 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-quiesce/no 6-workunit/fs/test_o_trunc}} 3
Failure Reason:

Command failed on smithi047 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:25eb20a061356442d4a1c711818ce2e5848c382d pull'

fail 7700694 2024-05-10 05:12:25 2024-05-10 06:03:25 2024-05-10 06:25:13 0:21:48 0:08:33 0:13:15 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-comp omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/suites/pjd}} 3
Failure Reason:

Command failed on smithi031 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:25eb20a061356442d4a1c711818ce2e5848c382d pull'

pass 7700616 2024-05-10 02:37:38 2024-05-10 05:06:10 2024-05-10 06:07:51 1:01:41 0:55:15 0:06:26 smithi main rhel 8.6 fs/multifs/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_20.04} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover} 2
pass 7700513 2024-05-10 02:35:52 2024-05-10 03:43:53 2024-05-10 05:06:18 1:22:25 1:15:31 0:06:54 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/fs/misc}} 3
pass 7700409 2024-05-10 02:34:07 2024-05-10 02:37:40 2024-05-10 03:44:42 1:07:02 0:54:57 0:12:05 smithi main ubuntu 20.04 fs/multifs/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_20.04} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover} 2
fail 7700383 2024-05-10 02:08:43 2024-05-10 08:01:56 2024-05-10 10:30:13 2:28:17 2:17:56 0:10:21 smithi main centos 9.stream upgrade/quincy-x/stress-split/{0-distro/centos_9.stream 0-roles 1-start 2-first-half-tasks/rbd_api 3-stress-tasks/{radosbench rbd-cls rbd-import-export rbd_api readwrite snaps-few-objects} 4-second-half-tasks/rbd-import-export mon_election/classic} 2
Failure Reason:

"1715329460.0713909 mon.a (mon.0) 516 : cluster [WRN] Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY)" in cluster log

pass 7700097 2024-05-09 21:05:55 2024-05-10 20:18:41 2024-05-10 20:52:51 0:34:10 0:23:20 0:10:50 smithi main centos 9.stream rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_latest} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 7700061 2024-05-09 21:05:20 2024-05-10 19:56:24 2024-05-10 20:19:23 0:22:59 0:11:26 0:11:33 smithi main centos 9.stream rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-comp-zlib rados recovery-overrides/{default} supported-random-distro$/{centos_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 7700001 2024-05-09 21:04:24 2024-05-10 19:33:35 2024-05-10 19:57:42 0:24:07 0:13:21 0:10:46 smithi main centos 9.stream rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
pass 7699937 2024-05-09 21:03:22 2024-05-10 19:10:26 2024-05-10 19:34:21 0:23:55 0:12:19 0:11:36 smithi main ubuntu 22.04 rados/singleton-nomsgr/{all/large-omap-object-warnings mon_election/classic rados supported-random-distro$/{ubuntu_latest}} 1
pass 7699893 2024-05-09 21:02:38 2024-05-10 13:11:22 2024-05-10 13:47:37 0:36:15 0:24:44 0:11:31 smithi main centos 8.stream rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/pacific backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/rbd_cls} 3
pass 7699850 2024-05-09 21:01:55 2024-05-10 12:37:01 2024-05-10 13:11:22 0:34:21 0:22:49 0:11:32 smithi main ubuntu 22.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-1} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/read mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/cache-snaps} 2
pass 7699649 2024-05-09 09:15:58 2024-05-09 09:42:55 2024-05-09 10:08:14 0:25:19 0:13:57 0:11:22 smithi main ubuntu 22.04 rgw/multifs/{clusters/fixed-2 frontend/beast ignore-pg-availability objectstore/bluestore-bitmap overrides rgw_pool_type/replicated s3tests-branch tasks/rgw_multipart_upload ubuntu_latest} 2