Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi171.front.sepia.ceph.com smithi True False centos 9 x86_64 /home/teuthworker/archive/teuthology-2024-04-15_20:08:15-orch-main-distro-default-smithi/7657404
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7657849 2024-04-15 22:11:33 2024-04-16 01:31:40 2024-04-16 01:58:58 0:27:18 0:20:16 0:07:02 smithi main rhel 8.6 orch/cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
pass 7657787 2024-04-15 22:10:29 2024-04-16 00:51:10 2024-04-16 01:30:29 0:39:19 0:33:02 0:06:17 smithi main rhel 8.6 orch/cephadm/with-work/{0-distro/rhel_8.6_container_tools_rhel8 fixed-2 mode/root mon_election/classic msgr/async-v1only start tasks/rotate-keys} 2
pass 7657722 2024-04-15 22:09:26 2024-04-16 00:06:03 2024-04-16 00:49:47 0:43:44 0:34:20 0:09:24 smithi main centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/reef/{v18.2.0} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7657670 2024-04-15 22:08:37 2024-04-15 23:32:54 2024-04-16 00:05:13 0:32:19 0:21:33 0:10:46 smithi main ubuntu 20.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
pass 7657625 2024-04-15 21:32:28 2024-04-15 22:32:27 2024-04-15 23:12:58 0:40:31 0:28:22 0:12:09 smithi main centos 9.stream powercycle/osd/{clusters/3osd-1per-target ignorelist_health objectstore/bluestore-hybrid powercycle/default supported-distros/centos_latest tasks/snaps-many-objects thrashosds-health} 4
fail 7657595 2024-04-15 21:11:23 2024-04-15 22:16:50 2024-04-15 22:30:29 0:13:39 0:04:38 0:09:01 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi106 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:a9a752df26c63acad72e1b3569fd79a515ca0765 pull'

fail 7657501 2024-04-15 21:09:48 2024-04-15 21:39:08 2024-04-15 22:12:48 0:33:40 0:18:04 0:15:36 smithi main centos 9.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
Failure Reason:

Command failed on smithi106 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 528464ce-fb73-11ee-bc8f-c7b262605968 -e sha1=a9a752df26c63acad72e1b3569fd79a515ca0765 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

pass 7657404 2024-04-15 20:12:28 2024-04-16 01:59:31 2024-04-16 02:22:48 0:23:17 0:14:06 0:09:11 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/mirror 3-final} 2
pass 7657352 2024-04-15 20:11:41 2024-04-15 21:44:19 2100 smithi main centos 9.stream orch/cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
fail 7657304 2024-04-15 20:10:55 2024-04-15 20:11:43 2024-04-15 20:48:57 0:37:14 0:25:52 0:11:22 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

"2024-04-15T20:43:30.090655+0000 mon.smithi106 (mon.0) 756 : cluster [WRN] Health check failed: Failed to place 1 daemon(s) (CEPHADM_DAEMON_PLACE_FAIL)" in cluster log

pass 7657201 2024-04-15 17:33:02 2024-04-15 17:50:05 2024-04-15 18:55:46 1:05:41 0:55:11 0:10:30 smithi main centos 9.stream rgw/verify/{0-install clusters/fixed-2 datacache/rgw-datacache frontend/beast ignore-pg-availability inline-data$/{on} msgr-failures/few objectstore/bluestore-bitmap overrides proto/http rgw_pool_type/ec s3tests-branch sharding$/{default} striping$/{stripe-equals-chunk} supported-random-distro$/{centos_latest} tasks/{bucket-check cls mp_reupload ragweed reshard s3tests-java s3tests versioning} validater/valgrind} 2
dead 7657012 2024-04-15 03:19:34 2024-04-15 03:26:47 2024-04-15 15:36:06 12:09:19 smithi main centos 9.stream rgw/notifications/{beast bluestore-bitmap fixed-2 ignore-pg-availability overrides tasks/others/{0-install supported-distros/{centos_latest} test_others}} 2
Failure Reason:

hit max job timeout

fail 7656970 2024-04-15 00:24:48 2024-04-15 02:09:38 2024-04-15 03:15:41 1:06:03 0:56:39 0:09:24 smithi main centos 8.stream upgrade:octopus-x/parallel/{0-distro/centos_8.stream_container_tools 0-start 1-tasks mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api}} 2
Failure Reason:

Command failed on smithi106 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:octopus shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c9451350-facf-11ee-bc8e-c7b262605968 -e sha1=4f113f5455944ac06c5d93df586ceb9be0f9dfff -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

pass 7656643 2024-04-14 21:28:39 2024-04-15 00:07:26 2024-04-15 00:33:35 0:26:09 0:15:56 0:10:13 smithi main ubuntu 22.04 fs/multiclient/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1-mds-2-client conf/{client mds mgr mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/mdtest} 4
fail 7656599 2024-04-14 21:27:53 2024-04-14 23:34:32 2024-04-14 23:53:33 0:19:01 0:08:32 0:10:29 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/suites/dbench}} 3
Failure Reason:

Command failed on smithi087 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:a9a752df26c63acad72e1b3569fd79a515ca0765 pull'

fail 7656561 2024-04-14 21:27:12 2024-04-14 23:10:51 2024-04-14 23:29:30 0:18:39 0:06:50 0:11:49 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/fs/test_o_trunc}} 3
Failure Reason:

Command failed on smithi049 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:a9a752df26c63acad72e1b3569fd79a515ca0765 pull'

pass 7656523 2024-04-14 21:26:30 2024-04-14 22:41:41 2024-04-14 23:10:46 0:29:05 0:17:44 0:11:21 smithi main ubuntu 22.04 fs/multiclient/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1-mds-3-client conf/{client mds mgr mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/mdtest} 5
pass 7656477 2024-04-14 21:25:40 2024-04-14 22:06:16 2024-04-14 22:38:26 0:32:10 0:21:04 0:11:06 smithi main centos 8.stream fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_8.stream clusters/1-mds-2-client-micro conf/{client mds mgr mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}} 3
fail 7656426 2024-04-14 21:24:46 2024-04-14 21:36:01 2024-04-14 21:53:44 0:17:43 0:07:37 0:10:06 smithi main centos 9.stream fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/dbench}} 3
Failure Reason:

Command failed on smithi038 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:a9a752df26c63acad72e1b3569fd79a515ca0765 pull'

pass 7656341 2024-04-14 21:05:53 2024-04-15 01:38:27 2024-04-15 02:09:34 0:31:07 0:21:17 0:09:50 smithi main centos 9.stream rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-5} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_latest} thrashers/default thrashosds-health workloads/small-objects} 2