Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi029.front.sepia.ceph.com smithi True False ubuntu 22.04 x86_64 /home/teuthworker/archive/leonidus-2024-04-23_09:49:14-fs-wip-lusov-quiescer-distro-default-smithi/7669532
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7669532 2024-04-23 09:50:15 2024-04-23 09:51:19 2024-04-23 10:32:39 0:41:20 0:27:24 0:13:56 smithi main ubuntu 22.04 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a5s-mds-1c-client conf/{client mds mgr mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{client-shutdown frag ignorelist_health ignorelist_wrongly_marked_down pg_health prefetch_dirfrags/no prefetch_dirfrags/yes prefetch_entire_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/with-quiesce 2-workunit/suites/fsstress}} 2
pass 7669453 2024-04-23 01:24:06 2024-04-23 01:25:59 2024-04-23 02:01:27 0:35:28 0:27:58 0:07:30 smithi main centos 9.stream crimson-rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} clusters/{fixed-2} crimson-supported-all-distro/centos_latest crimson_qa_overrides deploy/ceph objectstore/bluestore thrashers/simple thrashosds-health workloads/snaps-few-objects-balanced} 2
fail 7669242 2024-04-22 22:46:57 2024-04-22 23:23:59 2024-04-22 23:38:06 0:14:07 0:07:03 0:07:04 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi052 with status 125: "sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:43be020184947e53516056c9931e1ac5bdbbb1a5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid dde0908c-0100-11ef-bc93-c7b262605968 -- ceph orch apply mon '2;smithi029:172.21.15.29=smithi029;smithi052:172.21.15.52=smithi052'"

fail 7669199 2024-04-22 22:46:12 2024-04-22 23:08:01 2024-04-22 23:21:18 0:13:17 0:06:36 0:06:41 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi191 with status 125: "sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:43be020184947e53516056c9931e1ac5bdbbb1a5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 93431434-00fe-11ef-bc93-c7b262605968 -- ceph orch apply mon '2;smithi029:172.21.15.29=smithi029;smithi191:172.21.15.191=smithi191'"

fail 7669179 2024-04-22 22:45:51 2024-04-22 22:52:41 2024-04-22 23:05:52 0:13:11 0:06:34 0:06:37 smithi main centos 9.stream orch:cephadm/smoke-singlehost/{0-random-distro$/{centos_9.stream} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi029 with status 2: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:43be020184947e53516056c9931e1ac5bdbbb1a5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 587fd4e2-00fc-11ef-bc93-c7b262605968 -- ceph-volume lvm zap /dev/vg_nvme/lv_4'

pass 7669132 2024-04-22 22:10:54 2024-04-23 02:00:58 2024-04-23 02:34:26 0:33:28 0:27:09 0:06:19 smithi main rhel 8.6 orch/cephadm/orchestrator_cli/{0-random-distro$/{rhel_8.6_container_tools_3.0} 2-node-mgr agent/on orchestrator_cli} 2
pass 7669004 2024-04-22 22:08:54 2024-04-23 00:35:29 2024-04-23 01:26:06 0:50:37 0:39:28 0:11:09 smithi main centos 8.stream orch/cephadm/with-work/{0-distro/centos_8.stream_container_tools_crun fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
pass 7668968 2024-04-22 21:32:58 2024-04-23 00:03:20 2024-04-23 00:36:04 0:32:44 0:22:26 0:10:18 smithi main centos 9.stream powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-bitmap powercycle/default supported-distros/centos_latest tasks/snaps-many-objects thrashosds-health} 4
pass 7668949 2024-04-22 21:32:40 2024-04-22 23:39:22 2024-04-23 00:06:21 0:26:59 0:14:53 0:12:06 smithi main ubuntu 22.04 powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-hybrid powercycle/default supported-distros/ubuntu_latest tasks/cfuse_workunit_suites_truncate_delay thrashosds-health} 4
fail 7668909 2024-04-22 21:11:36 2024-04-22 22:21:55 2024-04-22 22:48:03 0:26:08 0:17:00 0:09:08 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi029 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:quincy shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c6260dda-00f8-11ef-bc93-c7b262605968 -e sha1=430e09df97c8fc7dc2b2ae424f68ed11366c540f -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7668834 2024-04-22 21:10:19 2024-04-22 21:50:24 2024-04-22 22:09:09 0:18:45 0:07:47 0:10:58 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi029 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:430e09df97c8fc7dc2b2ae424f68ed11366c540f pull'

fail 7668795 2024-04-22 21:09:39 2024-04-22 21:34:47 2024-04-22 21:46:46 0:11:59 0:03:51 0:08:08 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi029 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:430e09df97c8fc7dc2b2ae424f68ed11366c540f pull'

fail 7668677 2024-04-22 20:12:27 2024-04-23 02:34:46 2024-04-23 03:09:46 0:35:00 0:23:40 0:11:20 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final} 2
Failure Reason:

"2024-04-23T03:04:18.607793+0000 mon.smithi029 (mon.0) 859 : cluster [WRN] Health check failed: Failed to place 1 daemon(s) (CEPHADM_DAEMON_PLACE_FAIL)" in cluster log

fail 7668645 2024-04-22 20:11:55 2024-04-22 21:03:32 2024-04-22 21:26:44 0:23:12 0:15:09 0:08:03 smithi main centos 9.stream orch/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

"2024-04-22T21:18:59.678570+0000 mon.a (mon.0) 655 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log

fail 7668566 2024-04-22 20:10:41 2024-04-22 20:12:05 2024-04-22 20:57:29 0:45:24 0:38:26 0:06:58 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.0} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (51) after waiting for 300 seconds

pass 7668518 2024-04-22 19:32:02 2024-04-22 19:32:46 2024-04-22 20:07:42 0:34:56 0:18:26 0:16:30 smithi main ubuntu 22.04 rgw/hadoop-s3a/{clusters/fixed-2 hadoop/v32 ignore-pg-availability overrides s3a-hadoop supported-random-distro$/{ubuntu_latest}} 2
pass 7668472 2024-04-22 18:22:14 2024-04-22 18:22:16 2024-04-22 18:48:15 0:25:59 0:18:05 0:07:54 smithi main centos 9.stream rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v1only objectstore/bluestore-stupid rados tasks/rados_cls_all validater/lockdep} 2
fail 7668141 2024-04-22 05:53:48 2024-04-22 06:20:52 2024-04-22 07:01:31 0:40:39 0:31:54 0:08:45 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-quiesce/with-quiesce 6-workunit/suites/pjd}} 3
Failure Reason:

error during quiesce thrashing: Expecting value: line 1 column 1 (char 0)

pass 7668069 2024-04-22 00:24:54 2024-04-22 03:54:21 2024-04-22 05:13:56 1:19:35 1:07:49 0:11:46 smithi main centos 8.stream upgrade:octopus-x/parallel/{0-distro/centos_8.stream_container_tools_crun 0-start 1-tasks mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api}} 2
pass 7668030 2024-04-21 22:06:24 2024-04-22 09:39:53 2024-04-22 10:27:43 0:47:50 0:40:05 0:07:45 smithi main rhel 8.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/crush-compat mon_election/connectivity msgr-failures/osd-dispatch-delay msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/snaps-few-objects-localized} 2