Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi173.front.sepia.ceph.com smithi True True 2024-04-23 16:09:05.038229 scheduled_yuriw@teuthology x86_64 /home/teuthworker/archive/yuriw-2024-04-23_14:14:08-rados-wip-yuri3-testing-2024-04-05-0825-distro-default-smithi/7669767
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
waiting 7669767 2024-04-23 14:18:25 2024-04-23 16:08:44 2024-04-23 16:09:06 0:01:28 0:01:28 smithi main centos 9.stream rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/osd-delay objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 7669707 2024-04-23 14:17:21 2024-04-23 15:38:07 2024-04-23 16:08:36 0:30:29 0:17:47 0:12:42 smithi main ubuntu 22.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-1} backoff/peering_and_degraded ceph clusters/{fixed-4 openstack} crc-failures/default d-balancer/crush-compat mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/set-chunks-read} 4
pass 7669657 2024-04-23 14:16:29 2024-04-23 15:15:04 2024-04-23 15:40:32 0:25:28 0:13:53 0:11:35 smithi main ubuntu 22.04 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
fail 7669569 2024-04-23 14:04:47 2024-04-23 14:05:35 2024-04-23 15:10:10 1:04:35 0:43:24 0:21:11 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/suites/ffsb}} 3
Failure Reason:

Command failed (workunit test suites/ffsb.sh) on smithi083 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=d44fddd910b5e94da0f2357af4be37d7bd495ae0 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/ffsb.sh'

pass 7669504 2024-04-23 09:50:07 2024-04-23 09:50:58 2024-04-23 10:38:12 0:47:14 0:40:22 0:06:52 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-quiesce/with-quiesce 6-workunit/postgres}} 3
pass 7669473 2024-04-23 05:01:21 2024-04-23 05:01:21 2024-04-23 05:26:58 0:25:37 0:14:47 0:10:50 smithi main ubuntu 22.04 smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{ubuntu_latest} tasks/{0-install test/kclient_workunit_direct_io}} 3
fail 7669257 2024-04-22 22:47:13 2024-04-22 23:39:17 2024-04-23 00:06:46 0:27:29 0:15:59 0:11:30 smithi main ubuntu 22.04 orch:cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

"2024-04-23T00:03:20.892648+0000 mon.a (mon.0) 101 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log

fail 7669231 2024-04-22 22:46:46 2024-04-22 23:23:45 2024-04-22 23:36:41 0:12:56 0:06:29 0:06:27 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-haproxy-proto 3-final} 2
Failure Reason:

Command failed on smithi173 with status 125: "sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:43be020184947e53516056c9931e1ac5bdbbb1a5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a06f0fb2-0100-11ef-bc93-c7b262605968 -- ceph orch apply mon '2;smithi094:172.21.15.94=smithi094;smithi173:172.21.15.173=smithi173'"

fail 7669205 2024-04-22 22:46:18 2024-04-22 23:08:13 2024-04-22 23:24:09 0:15:56 0:08:23 0:07:33 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw 3-final} 2
Failure Reason:

Command failed on smithi178 with status 125: "sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:43be020184947e53516056c9931e1ac5bdbbb1a5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a2f0a036-00fe-11ef-bc93-c7b262605968 -- ceph orch apply mon '2;smithi173:172.21.15.173=smithi173;smithi178:172.21.15.178=smithi178'"

pass 7669088 2024-04-22 22:10:13 2024-04-23 01:35:38 2024-04-23 02:28:30 0:52:52 0:41:53 0:10:59 smithi main centos 8.stream orch/cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} workloads/cephadm_iscsi} 3
pass 7669046 2024-04-22 22:09:33 2024-04-23 00:59:09 2024-04-23 01:37:10 0:38:01 0:26:44 0:11:17 smithi main centos 8.stream orch/cephadm/no-agent-workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_orch_cli_mon} 5
pass 7668970 2024-04-22 21:33:00 2024-04-23 00:10:02 2024-04-23 00:59:51 0:49:49 0:41:59 0:07:50 smithi main centos 9.stream powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-comp-snappy powercycle/default supported-distros/centos_latest tasks/cfuse_workunit_kernel_untar_build thrashosds-health} 4
pass 7668927 2024-04-22 21:32:18 2024-04-22 22:37:15 2024-04-22 23:09:14 0:31:59 0:18:34 0:13:25 smithi main ubuntu 22.04 powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-stupid powercycle/default supported-distros/ubuntu_latest tasks/admin_socket_objecter_requests thrashosds-health} 4
fail 7668885 2024-04-22 21:11:11 2024-04-22 22:21:47 2024-04-22 22:35:33 0:13:46 0:03:46 0:10:00 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi090 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:430e09df97c8fc7dc2b2ae424f68ed11366c540f pull'

fail 7668816 2024-04-22 21:10:00 2024-04-22 21:50:17 2024-04-22 22:13:13 0:22:56 0:16:15 0:06:41 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi137 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:quincy shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ea8b395c-00f3-11ef-bc93-c7b262605968 -e sha1=430e09df97c8fc7dc2b2ae424f68ed11366c540f -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7668769 2024-04-22 21:09:13 2024-04-22 21:29:47 2024-04-22 21:42:50 0:13:03 0:03:40 0:09:23 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/jaeger 3-final} 2
Failure Reason:

Command failed on smithi027 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:430e09df97c8fc7dc2b2ae424f68ed11366c540f pull'

pass 7668694 2024-04-22 20:12:42 2024-04-23 02:46:55 2024-04-23 03:26:19 0:39:24 0:32:06 0:07:18 smithi main centos 9.stream orch/cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async root} 2
pass 7668666 2024-04-22 20:12:16 2024-04-23 02:28:31 2024-04-23 02:47:06 0:18:35 0:10:52 0:07:43 smithi main centos 9.stream orch/cephadm/orchestrator_cli/{0-random-distro$/{centos_9.stream} 2-node-mgr agent/off orchestrator_cli} 2
pass 7668636 2024-04-22 20:11:46 2024-04-22 20:57:48 2024-04-22 21:30:19 0:32:31 0:18:39 0:13:52 smithi main ubuntu 22.04 orch/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/deploy-raw} 2
pass 7668600 2024-04-22 20:11:12 2024-04-22 20:34:31 2024-04-22 20:59:02 0:24:31 0:15:16 0:09:15 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2