Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7778294 2024-06-28 21:08:39 2024-06-29 05:28:31 2024-06-29 05:45:23 0:16:52 0:07:43 0:09:09 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/classic task/test_set_mon_crush_locations} 3
Failure Reason:

Command failed on smithi047 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778295 2024-06-28 21:08:40 2024-06-29 05:28:31 2024-06-29 05:43:55 0:15:24 0:04:25 0:10:59 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

Command failed on smithi087 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778296 2024-06-28 21:08:41 2024-06-29 05:28:32 2024-06-29 05:47:01 0:18:29 0:07:23 0:11:06 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw 3-final} 2
Failure Reason:

Command failed on smithi022 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778297 2024-06-28 21:08:42 2024-06-29 05:29:02 2024-06-29 05:55:49 0:26:47 0:17:08 0:09:39 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi028 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:quincy shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 9ce6fbde-35da-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7778298 2024-06-28 21:08:43 2024-06-29 05:29:03 2024-06-29 05:46:47 0:17:44 0:07:20 0:10:24 smithi main ubuntu 22.04 orch/rook/smoke/{0-distro/ubuntu_22.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/calico rook/master} 1
Failure Reason:

Command failed on smithi106 with status 100: "sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl && sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && echo 'deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list && sudo apt update && sudo apt install -y kubelet kubeadm kubectl bridge-utils"

fail 7778299 2024-06-28 21:08:44 2024-06-29 05:29:03 2024-06-29 06:14:07 0:45:04 0:31:35 0:13:29 smithi main centos 9.stream orch/cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi066 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f057d554-35da-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778300 2024-06-28 21:08:45 2024-06-29 05:32:04 2024-06-29 05:46:54 0:14:50 0:06:41 0:08:09 smithi main centos 9.stream orch/cephadm/nfs/{cluster/{1-node} conf/{client mds mgr mon osd} overrides/{ignore_mgr_down ignorelist_health pg_health} supported-random-distros$/{centos_latest} tasks/nfs} 1
Failure Reason:

Command failed on smithi096 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

pass 7778301 2024-06-28 21:08:46 2024-06-29 05:32:04 2024-06-29 05:51:07 0:19:03 0:09:19 0:09:44 smithi main centos 9.stream orch/cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/classic task/test_adoption} 1
pass 7778302 2024-06-28 21:08:47 2024-06-29 05:32:04 2024-06-29 05:53:23 0:21:19 0:11:29 0:09:50 smithi main centos 9.stream orch/cephadm/orchestrator_cli/{0-random-distro$/{centos_9.stream_runc} 2-node-mgr agent/off orchestrator_cli} 2
fail 7778303 2024-06-28 21:08:48 2024-06-29 05:32:25 2024-06-29 05:45:38 0:13:13 0:04:31 0:08:42 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/repave-all} 2
Failure Reason:

Command failed on smithi026 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778304 2024-06-28 21:08:49 2024-06-29 05:32:25 2024-06-29 05:51:04 0:18:39 0:04:38 0:14:01 smithi main centos 9.stream orch/cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} workloads/cephadm_iscsi} 3
Failure Reason:

Command failed on smithi139 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778305 2024-06-28 21:08:50 2024-06-29 05:35:46 2024-06-29 05:52:54 0:17:08 0:07:10 0:09:58 smithi main centos 9.stream orch/cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_basic} 2
Failure Reason:

Command failed on smithi001 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778306 2024-06-28 21:08:51 2024-06-29 05:35:56 2024-06-29 05:51:00 0:15:04 0:04:39 0:10:25 smithi main centos 9.stream orch/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi063 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778307 2024-06-28 21:08:53 2024-06-29 05:35:57 2024-06-29 05:50:11 0:14:14 0:04:30 0:09:44 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/basic 3-final} 2
Failure Reason:

Command failed on smithi038 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778308 2024-06-28 21:08:54 2024-06-29 05:37:17 2024-06-29 05:54:49 0:17:32 0:07:05 0:10:27 smithi main ubuntu 22.04 orch/cephadm/smoke-singlehost/{0-random-distro$/{ubuntu_22.04} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi181 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778309 2024-06-28 21:08:55 2024-06-29 05:37:18 2024-06-29 05:51:03 0:13:45 0:04:33 0:09:12 smithi main centos 9.stream orch/cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/classic start} 3
Failure Reason:

Command failed on smithi117 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778310 2024-06-28 21:08:56 2024-06-29 05:38:08 2024-06-29 05:57:33 0:19:25 0:09:01 0:10:24 smithi main ubuntu 22.04 orch/cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi043 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778311 2024-06-28 21:08:57 2024-06-29 05:38:08 2024-06-29 06:16:49 0:38:41 0:25:29 0:13:12 smithi main ubuntu 22.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
Failure Reason:

Command failed on smithi055 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5ff2c260-35dc-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778312 2024-06-28 21:08:58 2024-06-29 05:41:29 2024-06-29 05:58:42 0:17:13 0:06:41 0:10:32 smithi main centos 9.stream orch/cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/connectivity msgr/async-v1only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi099 with status 1: 'sudo cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778313 2024-06-28 21:08:59 2024-06-29 05:41:30 2024-06-29 05:59:15 0:17:45 0:06:59 0:10:46 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/connectivity task/test_ca_signed_key} 2
Failure Reason:

Command failed on smithi016 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778314 2024-06-28 21:09:00 2024-06-29 05:42:30 2024-06-29 05:58:17 0:15:47 0:04:49 0:10:58 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi060 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778315 2024-06-28 21:09:01 2024-06-29 05:42:31 2024-06-29 06:00:36 0:18:05 0:07:20 0:10:45 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi097 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778316 2024-06-28 21:09:02 2024-06-29 05:42:31 2024-06-29 06:05:25 0:22:54 0:13:22 0:09:32 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/classic task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi110 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=c39e712d7c0a32002068c7313f129279cce6c132 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 7778317 2024-06-28 21:09:03 2024-06-29 05:42:31 2024-06-29 05:55:44 0:13:13 0:04:30 0:08:43 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/jaeger 3-final} 2
Failure Reason:

Command failed on smithi044 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778318 2024-06-28 21:09:04 2024-06-29 05:42:32 2024-06-29 05:56:15 0:13:43 0:04:30 0:09:13 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi003 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778319 2024-06-28 21:09:05 2024-06-29 05:43:12 2024-06-29 06:58:48 1:15:36 1:03:07 0:12:29 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.1} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi152 with status 1: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v18.2.1 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ac2c8e68-35dc-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c 'ceph orch ps'"

fail 7778320 2024-06-28 21:09:06 2024-06-29 05:43:13 2024-06-29 06:00:20 0:17:07 0:06:39 0:10:28 smithi main centos 9.stream orch/cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/connectivity task/test_cephadm_timeout} 1
Failure Reason:

Command failed on smithi150 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778321 2024-06-28 21:09:07 2024-06-29 05:43:13 2024-06-29 05:59:07 0:15:54 0:05:29 0:10:25 smithi main centos 9.stream orch/cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain} 2
Failure Reason:

Command failed on smithi012 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778322 2024-06-28 21:09:08 2024-06-29 05:44:03 2024-06-29 05:57:16 0:13:13 0:04:26 0:08:47 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi070 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778323 2024-06-28 21:09:09 2024-06-29 05:44:04 2024-06-29 06:04:51 0:20:47 0:08:48 0:11:59 smithi main ubuntu 22.04 orch/cephadm/workunits/{0-distro/ubuntu_22.04 agent/off mon_election/connectivity task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi049 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=c39e712d7c0a32002068c7313f129279cce6c132 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

fail 7778324 2024-06-28 21:09:10 2024-06-29 05:44:04 2024-06-29 06:01:09 0:17:05 0:07:03 0:10:02 smithi main centos 9.stream orch/cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi087 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778325 2024-06-28 21:09:11 2024-06-29 05:44:04 2024-06-29 06:00:13 0:16:09 0:06:30 0:09:39 smithi main centos 9.stream orch/cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/root mon_election/classic msgr/async-v2only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778326 2024-06-28 21:09:12 2024-06-29 05:45:15 2024-06-29 06:02:52 0:17:37 0:07:20 0:10:17 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-haproxy-proto 3-final} 2
Failure Reason:

Command failed on smithi081 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778327 2024-06-28 21:09:13 2024-06-29 05:45:15 2024-06-29 06:00:28 0:15:13 0:04:46 0:10:27 smithi main centos 9.stream orch/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi047 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778328 2024-06-28 21:09:14 2024-06-29 05:45:26 2024-06-29 05:58:31 0:13:05 0:04:22 0:08:43 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi100 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778329 2024-06-28 21:09:15 2024-06-29 05:45:36 2024-06-29 06:01:34 0:15:58 0:04:35 0:11:23 smithi main centos 9.stream orch/cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 3
Failure Reason:

Command failed on smithi018 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778330 2024-06-28 21:09:16 2024-06-29 05:45:36 2024-06-29 06:02:41 0:17:05 0:07:19 0:09:46 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/classic task/test_extra_daemon_features} 2
Failure Reason:

Command failed on smithi046 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778331 2024-06-28 21:09:17 2024-06-29 05:45:37 2024-06-29 06:04:08 0:18:31 0:07:22 0:11:09 smithi main ubuntu 22.04 orch/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi039 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778332 2024-06-28 21:09:18 2024-06-29 05:45:37 2024-06-29 06:16:47 0:31:10 0:21:44 0:09:26 smithi main centos 9.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/on mon_election/connectivity} 2
Failure Reason:

Command failed on smithi026 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 9bc9105a-35dc-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.mgr | length == 2\'"\'"\'\''

fail 7778333 2024-06-28 21:09:19 2024-06-29 05:45:47 2024-06-29 05:59:52 0:14:05 0:04:39 0:09:26 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi096 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778334 2024-06-28 21:09:20 2024-06-29 05:46:58 2024-06-29 06:14:00 0:27:02 0:17:37 0:09:25 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi022 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:quincy shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2be7ac78-35dd-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7778335 2024-06-28 21:09:21 2024-06-29 05:47:08 2024-06-29 06:09:53 0:22:45 0:08:59 0:13:46 smithi main ubuntu 22.04 orch/cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi038 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778336 2024-06-28 21:09:22 2024-06-29 05:50:19 2024-06-29 06:10:27 0:20:08 0:09:08 0:11:00 smithi main ubuntu 22.04 orch/cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_basic} 2
Failure Reason:

Command failed on smithi114 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778337 2024-06-28 21:09:23 2024-06-29 05:50:40 2024-06-29 06:09:17 0:18:37 0:07:29 0:11:08 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi008 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778338 2024-06-28 21:09:24 2024-06-29 05:50:40 2024-06-29 06:11:51 0:21:11 0:08:28 0:12:43 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/connectivity task/test_host_drain} 3
Failure Reason:

Command failed on smithi007 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778339 2024-06-28 21:09:25 2024-06-29 05:50:40 2024-06-29 06:07:38 0:16:58 0:07:16 0:09:42 smithi main centos 9.stream orch/cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi133 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778340 2024-06-28 21:09:26 2024-06-29 05:50:41 2024-06-29 06:05:09 0:14:28 0:04:40 0:09:48 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
Failure Reason:

Command failed on smithi063 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778341 2024-06-28 21:09:27 2024-06-29 05:51:01 2024-06-29 06:09:36 0:18:35 0:08:56 0:09:39 smithi main ubuntu 22.04 orch/cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/packaged mon_election/connectivity msgr/async start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi117 with status 1: 'sudo cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778342 2024-06-28 21:09:28 2024-06-29 05:51:11 2024-06-29 06:10:04 0:18:53 0:07:32 0:11:21 smithi main ubuntu 22.04 orch/rook/smoke/{0-distro/ubuntu_22.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/flannel rook/1.7.2} 3
Failure Reason:

Command failed on smithi139 with status 100: "sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl && sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && echo 'deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list && sudo apt update && sudo apt install -y kubelet kubeadm kubectl bridge-utils"

fail 7778343 2024-06-28 21:09:29 2024-06-29 05:51:12 2024-06-29 06:08:02 0:16:50 0:04:41 0:12:09 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi173 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778344 2024-06-28 21:09:30 2024-06-29 05:51:12 2024-06-29 06:08:35 0:17:23 0:04:36 0:12:47 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final} 2
Failure Reason:

Command failed on smithi027 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778345 2024-06-28 21:09:31 2024-06-29 05:52:43 2024-06-29 06:09:53 0:17:10 0:07:10 0:10:00 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/classic task/test_iscsi_container/{centos_9.stream test_iscsi_container}} 1
Failure Reason:

Command failed on smithi040 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778346 2024-06-28 21:09:32 2024-06-29 05:52:43 2024-06-29 06:11:18 0:18:35 0:07:23 0:11:12 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

Command failed on smithi017 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778347 2024-06-28 21:09:33 2024-06-29 05:52:43 2024-06-29 06:11:32 0:18:49 0:07:21 0:11:28 smithi main ubuntu 22.04 orch/cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi050 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778348 2024-06-28 21:09:34 2024-06-29 05:52:44 2024-06-29 06:25:04 0:32:20 0:22:21 0:09:59 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi001 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:reef shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 234609f6-35de-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778349 2024-06-28 21:09:35 2024-06-29 05:53:04 2024-06-29 06:14:40 0:21:36 0:09:20 0:12:16 smithi main centos 9.stream orch/cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi042 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

pass 7778350 2024-06-28 21:09:36 2024-06-29 05:55:45 2024-06-29 06:16:40 0:20:55 0:12:15 0:08:40 smithi main centos 9.stream orch/cephadm/orchestrator_cli/{0-random-distro$/{centos_9.stream} 2-node-mgr agent/on orchestrator_cli} 2
fail 7778351 2024-06-28 21:09:37 2024-06-29 05:55:55 2024-06-29 06:11:19 0:15:24 0:05:36 0:09:48 smithi main centos 9.stream orch/cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_domain} 2
Failure Reason:

Command failed on smithi003 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778352 2024-06-28 21:09:38 2024-06-29 05:56:26 2024-06-29 06:10:21 0:13:55 0:04:29 0:09:26 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
Failure Reason:

Command failed on smithi070 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778353 2024-06-28 21:09:39 2024-06-29 05:57:26 2024-06-29 06:10:21 0:12:55 0:04:29 0:08:26 smithi main centos 9.stream orch/cephadm/smoke-singlehost/{0-random-distro$/{centos_9.stream_runc} 1-start 2-services/rgw 3-final} 1
Failure Reason:

Command failed on smithi115 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778354 2024-06-28 21:09:40 2024-06-29 05:57:27 2024-06-29 06:13:25 0:15:58 0:04:50 0:11:08 smithi main centos 9.stream orch/cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/classic start} 3
Failure Reason:

Command failed on smithi043 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778355 2024-06-28 21:09:41 2024-06-29 05:58:17 2024-06-29 06:15:32 0:17:15 0:07:22 0:09:53 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/connectivity task/test_monitoring_stack_basic} 3
Failure Reason:

Command failed on smithi100 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778356 2024-06-28 21:09:42 2024-06-29 05:58:38 2024-06-29 06:14:21 0:15:43 0:04:35 0:11:08 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi016 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778357 2024-06-28 21:09:43 2024-06-29 05:58:48 2024-06-29 06:17:57 0:19:09 0:09:05 0:10:04 smithi main ubuntu 22.04 orch/cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi012 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778358 2024-06-28 21:09:44 2024-06-29 05:58:48 2024-06-29 06:14:28 0:15:40 0:04:49 0:10:51 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nvmeof 3-final} 2
Failure Reason:

Command failed on smithi099 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778359 2024-06-28 21:09:45 2024-06-29 05:58:49 2024-06-29 06:35:35 0:36:46 0:26:35 0:10:11 smithi main ubuntu 22.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
Failure Reason:

Command failed on smithi078 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fd08e910-35de-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778360 2024-06-28 21:09:46 2024-06-29 05:59:59 2024-06-29 06:17:54 0:17:55 0:07:17 0:10:38 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778361 2024-06-28 21:09:47 2024-06-29 06:00:20 2024-06-29 06:17:48 0:17:28 0:07:20 0:10:08 smithi main centos 9.stream orch/cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/root mon_election/classic msgr/async start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi047 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778362 2024-06-28 21:09:48 2024-06-29 06:00:30 2024-06-29 06:20:24 0:19:54 0:08:23 0:11:31 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/classic task/test_rgw_multisite} 3
Failure Reason:

Command failed on smithi097 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778363 2024-06-28 21:09:49 2024-06-29 06:00:40 2024-06-29 06:16:34 0:15:54 0:04:30 0:11:24 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/rgw 3-final} 2
Failure Reason:

Command failed on smithi087 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778364 2024-06-28 21:09:50 2024-06-29 06:01:11 2024-06-29 06:28:52 0:27:41 0:17:11 0:10:30 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi062 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:quincy shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3fa533c8-35df-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

pass 7778365 2024-06-28 21:09:51 2024-06-29 06:01:21 2024-06-29 06:20:39 0:19:18 0:09:22 0:09:56 smithi main centos 9.stream orch/cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/classic task/test_adoption} 1
fail 7778366 2024-06-28 21:09:52 2024-06-29 06:01:21 2024-06-29 06:20:06 0:18:45 0:07:16 0:11:29 smithi main ubuntu 22.04 orch/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/repave-all} 2
Failure Reason:

Command failed on smithi111 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778367 2024-06-28 21:09:53 2024-06-29 06:01:42 2024-06-29 06:18:42 0:17:00 0:08:04 0:08:56 smithi main centos 9.stream orch/cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_basic} 2
Failure Reason:

Command failed on smithi018 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778368 2024-06-28 21:09:54 2024-06-29 06:01:42 2024-06-29 06:17:25 0:15:43 0:04:36 0:11:07 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/basic 3-final} 2
Failure Reason:

Command failed on smithi059 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778369 2024-06-28 21:09:55 2024-06-29 06:02:13 2024-06-29 06:22:40 0:20:27 0:09:18 0:11:09 smithi main ubuntu 22.04 orch/cephadm/workunits/{0-distro/ubuntu_22.04 agent/off mon_election/connectivity task/test_set_mon_crush_locations} 3
Failure Reason:

Command failed on smithi037 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778370 2024-06-28 21:09:56 2024-06-29 06:02:23 2024-06-29 06:21:09 0:18:46 0:07:27 0:11:19 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi046 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778371 2024-06-28 21:09:57 2024-06-29 06:02:43 2024-06-29 06:20:47 0:18:04 0:07:17 0:10:47 smithi main centos 9.stream orch/cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi081 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778372 2024-06-28 21:09:58 2024-06-29 06:02:54 2024-06-29 06:20:15 0:17:21 0:04:37 0:12:44 smithi main centos 9.stream orch/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi039 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778373 2024-06-28 21:09:59 2024-06-29 06:04:14 2024-06-29 06:19:53 0:15:39 0:04:40 0:10:59 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi138 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778374 2024-06-28 21:10:00 2024-06-29 06:04:45 2024-06-29 06:19:49 0:15:04 0:04:47 0:10:17 smithi main centos 9.stream orch/cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 3
Failure Reason:

Command failed on smithi005 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778375 2024-06-28 21:10:01 2024-06-29 06:04:45 2024-06-29 06:24:52 0:20:07 0:09:10 0:10:57 smithi main ubuntu 22.04 orch/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/classic task/test_ca_signed_key} 2
Failure Reason:

Command failed on smithi063 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778376 2024-06-28 21:10:02 2024-06-29 06:05:16 2024-06-29 06:23:36 0:18:20 0:06:15 0:12:05 smithi main centos 9.stream orch/cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/packaged mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi049 with status 1: 'sudo cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778377 2024-06-28 21:10:03 2024-06-29 06:05:26 2024-06-29 06:22:15 0:16:49 0:04:36 0:12:13 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/jaeger 3-final} 2
Failure Reason:

Command failed on smithi067 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778378 2024-06-28 21:10:04 2024-06-29 06:07:07 2024-06-29 06:22:07 0:15:00 0:04:32 0:10:28 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi045 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778379 2024-06-28 21:10:05 2024-06-29 06:07:07 2024-06-29 06:38:52 0:31:45 0:20:58 0:10:47 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.1} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi133 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v18.2.1 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 11dc8062-35e0-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778380 2024-06-28 21:10:06 2024-06-29 06:07:48 2024-06-29 06:25:33 0:17:45 0:08:49 0:08:56 smithi main ubuntu 22.04 orch/cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/connectivity task/test_cephadm_timeout} 1
Failure Reason:

Command failed on smithi173 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778381 2024-06-28 21:10:07 2024-06-29 06:08:08 2024-06-29 06:29:03 0:20:55 0:08:15 0:12:40 smithi main ubuntu 22.04 orch/cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_domain} 2
Failure Reason:

Command failed on smithi027 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778382 2024-06-28 21:10:08 2024-06-29 06:08:38 2024-06-29 06:28:04 0:19:26 0:07:24 0:12:02 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi040 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

pass 7778383 2024-06-28 21:10:09 2024-06-29 06:09:19 2024-06-29 06:32:08 0:22:49 0:14:37 0:08:12 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/connectivity task/test_cephadm} 1
fail 7778384 2024-06-28 21:10:10 2024-06-29 06:09:19 2024-06-29 06:41:06 0:31:47 0:20:36 0:11:11 smithi main centos 9.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
Failure Reason:

Command failed on smithi038 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1724f220-35e0-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.mgr | length == 2\'"\'"\'\''

fail 7778385 2024-06-28 21:10:11 2024-06-29 06:09:19 2024-06-29 06:25:30 0:16:11 0:04:37 0:11:34 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-haproxy-proto 3-final} 2
Failure Reason:

Command failed on smithi006 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

dead 7778386 2024-06-28 21:10:12 2024-06-29 06:09:20 2024-06-29 06:14:55 0:05:35 smithi main centos 9.stream orch/cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root} 2
Failure Reason:

Error reimaging machines: Expected smithi148's OS to be centos 9.stream but found ubuntu 22.04

fail 7778387 2024-06-28 21:10:13 2024-06-29 06:09:20 2024-06-29 06:26:57 0:17:37 0:07:28 0:10:09 smithi main ubuntu 22.04 orch/rook/smoke/{0-distro/ubuntu_22.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/host rook/master} 1
Failure Reason:

Command failed on smithi148 with status 100: "sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl && sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && echo 'deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list && sudo apt update && sudo apt install -y kubelet kubeadm kubectl bridge-utils"

fail 7778388 2024-06-28 21:10:14 2024-06-29 06:09:21 2024-06-29 06:25:24 0:16:03 0:05:20 0:10:43 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi117 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

pass 7778389 2024-06-28 21:10:15 2024-06-29 06:09:41 2024-06-29 06:27:30 0:17:49 0:06:14 0:11:35 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/classic task/test_cephadm_repos} 1
fail 7778390 2024-06-28 21:10:16 2024-06-29 06:10:12 2024-06-29 06:25:57 0:15:45 0:04:33 0:11:12 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi139 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778391 2024-06-28 21:10:17 2024-06-29 06:10:12 2024-06-29 06:28:47 0:18:35 0:07:20 0:11:15 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi070 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778392 2024-06-28 21:10:18 2024-06-29 06:10:22 2024-06-29 06:26:21 0:15:59 0:04:33 0:11:26 smithi main centos 9.stream orch/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi114 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778393 2024-06-28 21:10:19 2024-06-29 06:10:33 2024-06-29 06:29:23 0:18:50 0:08:58 0:09:52 smithi main ubuntu 22.04 orch/cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/root mon_election/classic msgr/async-v2only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi183 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778394 2024-06-28 21:10:20 2024-06-29 06:11:23 2024-06-29 06:38:31 0:27:08 0:17:29 0:09:39 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi003 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:quincy shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b6c1f8f0-35e0-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7778395 2024-06-28 21:10:21 2024-06-29 06:11:24 2024-06-29 06:53:14 0:41:50 0:30:43 0:11:07 smithi main centos 9.stream orch/cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi050 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 72727ac6-35e0-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778396 2024-06-28 21:10:22 2024-06-29 06:11:34 2024-06-29 06:29:37 0:18:03 0:08:53 0:09:10 smithi main ubuntu 22.04 orch/cephadm/nfs/{cluster/{1-node} conf/{client mds mgr mon osd} overrides/{ignore_mgr_down ignorelist_health pg_health} supported-random-distros$/{ubuntu_latest} tasks/nfs} 1
Failure Reason:

Command failed on smithi115 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778397 2024-06-28 21:10:23 2024-06-29 06:11:34 2024-06-29 06:29:32 0:17:58 0:07:03 0:10:55 smithi main centos 9.stream orch/cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi118 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

pass 7778398 2024-06-28 21:10:24 2024-06-29 06:11:55 2024-06-29 06:36:16 0:24:21 0:14:01 0:10:20 smithi main ubuntu 22.04 orch/cephadm/orchestrator_cli/{0-random-distro$/{ubuntu_22.04} 2-node-mgr agent/off orchestrator_cli} 2
fail 7778399 2024-06-28 21:10:25 2024-06-29 06:11:55 2024-06-29 06:28:34 0:16:39 0:04:47 0:11:52 smithi main centos 9.stream orch/cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} workloads/cephadm_iscsi} 3
Failure Reason:

Command failed on smithi043 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778400 2024-06-28 21:10:26 2024-06-29 06:13:36 2024-06-29 06:31:06 0:17:30 0:07:27 0:10:03 smithi main centos 9.stream orch/cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_basic} 2
Failure Reason:

Command failed on smithi022 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778401 2024-06-28 21:10:27 2024-06-29 06:14:06 2024-06-29 06:30:18 0:16:12 0:04:38 0:11:34 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi066 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778402 2024-06-28 21:10:28 2024-06-29 06:14:16 2024-06-29 06:27:26 0:13:10 0:04:33 0:08:37 smithi main centos 9.stream orch/cephadm/smoke-singlehost/{0-random-distro$/{centos_9.stream_runc} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi190 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778403 2024-06-28 21:10:29 2024-06-29 06:14:27 2024-06-29 06:30:21 0:15:54 0:04:58 0:10:56 smithi main centos 9.stream orch/cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/classic start} 3
Failure Reason:

Command failed on smithi016 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778404 2024-06-28 21:10:30 2024-06-29 06:14:37 2024-06-29 06:32:32 0:17:55 0:08:59 0:08:56 smithi main ubuntu 22.04 orch/cephadm/workunits/{0-distro/ubuntu_22.04 agent/off mon_election/connectivity task/test_extra_daemon_features} 2
Failure Reason:

Command failed on smithi092 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778405 2024-06-28 21:10:31 2024-06-29 06:14:48 2024-06-29 06:30:07 0:15:19 0:04:53 0:10:26 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
Failure Reason:

Command failed on smithi044 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778406 2024-06-28 21:10:32 2024-06-29 06:14:48 2024-06-29 06:33:09 0:18:21 0:07:15 0:11:06 smithi main ubuntu 22.04 orch/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi100 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778407 2024-06-28 21:10:33 2024-06-29 06:15:38 2024-06-29 06:32:48 0:17:10 0:07:10 0:10:00 smithi main centos 9.stream orch/cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi042 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778408 2024-06-28 21:10:33 2024-06-29 06:15:39 2024-06-29 06:34:20 0:18:41 0:07:18 0:11:23 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final} 2
Failure Reason:

Command failed on smithi087 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778409 2024-06-28 21:10:35 2024-06-29 06:16:39 2024-06-29 06:33:40 0:17:01 0:07:30 0:09:31 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/classic task/test_host_drain} 3
Failure Reason:

Command failed on smithi028 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778410 2024-06-28 21:10:36 2024-06-29 06:16:50 2024-06-29 06:29:30 0:12:40 0:04:31 0:08:09 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

Command failed on smithi026 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778411 2024-06-28 21:10:37 2024-06-29 06:16:50 2024-06-29 06:35:05 0:18:15 0:07:29 0:10:46 smithi main ubuntu 22.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
Failure Reason:

Command failed on smithi059 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull'

fail 7778412 2024-06-28 21:10:38 2024-06-29 06:17:31 2024-06-29 06:50:46 0:33:15 0:22:22 0:10:53 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi047 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:reef shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a15b1cb6-35e1-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778413 2024-06-28 21:10:39 2024-06-29 06:17:51 2024-06-29 06:35:20 0:17:29 0:07:28 0:10:01 smithi main centos 9.stream orch/cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi012 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778414 2024-06-28 21:10:40 2024-06-29 06:18:01 2024-06-29 06:33:37 0:15:36 0:05:16 0:10:20 smithi main centos 9.stream orch/cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain} 2
Failure Reason:

Command failed on smithi132 with status 125: 'sudo podman pull quay.io/samba.org/samba-ad-server:latest'

fail 7778415 2024-06-28 21:10:40 2024-06-29 06:18:52 2024-06-29 06:34:51 0:15:59 0:04:28 0:11:31 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
Failure Reason:

Command failed on smithi138 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778416 2024-06-28 21:10:41 2024-06-29 06:19:42 2024-06-29 06:38:21 0:18:39 0:07:33 0:11:06 smithi main centos 9.stream orch/cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/connectivity msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi111 with status 1: 'sudo cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778417 2024-06-28 21:10:42 2024-06-29 06:19:43 2024-06-29 06:36:37 0:16:54 0:06:42 0:10:12 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/connectivity task/test_iscsi_container/{centos_9.stream test_iscsi_container}} 1
Failure Reason:

Command failed on smithi097 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778418 2024-06-28 21:10:43 2024-06-29 06:19:43 2024-06-29 06:35:27 0:15:44 0:04:27 0:11:17 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi145 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778419 2024-06-28 21:10:44 2024-06-29 06:19:44 2024-06-29 06:38:28 0:18:44 0:07:20 0:11:24 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nvmeof 3-final} 2
Failure Reason:

Command failed on smithi088 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778420 2024-06-28 21:10:45 2024-06-29 06:19:44 2024-06-29 06:38:51 0:19:07 0:07:22 0:11:45 smithi main ubuntu 22.04 orch/cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi039 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778421 2024-06-28 21:10:46 2024-06-29 06:19:44 2024-06-29 06:39:34 0:19:50 0:09:01 0:10:49 smithi main ubuntu 22.04 orch/cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi005 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778422 2024-06-28 21:10:47 2024-06-29 06:19:45 2024-06-29 06:34:25 0:14:40 0:04:24 0:10:16 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

Command failed on smithi081 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778423 2024-06-28 21:10:48 2024-06-29 06:20:55 2024-06-29 06:36:29 0:15:34 0:05:05 0:10:29 smithi main centos 9.stream orch/cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 3
Failure Reason:

Command failed on smithi046 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778424 2024-06-28 21:10:49 2024-06-29 06:21:16 2024-06-29 06:42:13 0:20:57 0:09:12 0:11:45 smithi main ubuntu 22.04 orch/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/classic task/test_monitoring_stack_basic} 3
Failure Reason:

Command failed on smithi076 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778425 2024-06-28 21:10:50 2024-06-29 06:22:06 2024-06-29 06:37:18 0:15:12 0:04:47 0:10:25 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw 3-final} 2
Failure Reason:

Command failed on smithi067 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778426 2024-06-28 21:10:51 2024-06-29 06:22:17 2024-06-29 06:49:26 0:27:09 0:17:19 0:09:50 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi159 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:quincy shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0b1b80dc-35e2-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

pass 7778427 2024-06-28 21:10:52 2024-06-29 06:22:17 2024-06-29 06:44:28 0:22:11 0:12:23 0:09:48 smithi main ubuntu 22.04 orch/cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/classic task/test_adoption} 1
fail 7778428 2024-06-28 21:10:53 2024-06-29 06:22:17 2024-06-29 06:38:27 0:16:10 0:04:52 0:11:18 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/repave-all} 2
Failure Reason:

Command failed on smithi037 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778429 2024-06-28 21:10:54 2024-06-29 06:22:48 2024-06-29 06:43:27 0:20:39 0:09:19 0:11:20 smithi main ubuntu 22.04 orch/cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_basic} 2
Failure Reason:

Command failed on smithi110 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778430 2024-06-28 21:10:55 2024-06-29 06:23:38 2024-06-29 06:42:35 0:18:57 0:07:25 0:11:32 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/basic 3-final} 2
Failure Reason:

Command failed on smithi063 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778431 2024-06-28 21:10:56 2024-06-29 06:24:59 2024-06-29 06:45:11 0:20:12 0:08:13 0:11:59 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/connectivity task/test_rgw_multisite} 3
Failure Reason:

Command failed on smithi001 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778432 2024-06-28 21:10:57 2024-06-29 06:25:09 2024-06-29 06:42:54 0:17:45 0:07:27 0:10:18 smithi main ubuntu 22.04 orch/rook/smoke/{0-distro/ubuntu_22.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/calico rook/1.7.2} 1
Failure Reason:

Command failed on smithi117 with status 100: "sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl && sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && echo 'deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list && sudo apt update && sudo apt install -y kubelet kubeadm kubectl bridge-utils"

fail 7778433 2024-06-28 21:10:59 2024-06-29 06:25:30 2024-06-29 06:38:49 0:13:19 0:04:35 0:08:44 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
Failure Reason:

Command failed on smithi123 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778434 2024-06-28 21:11:00 2024-06-29 06:25:40 2024-06-29 06:43:15 0:17:35 0:06:21 0:11:14 smithi main centos 9.stream orch/cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/root mon_election/classic msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi006 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778435 2024-06-28 21:11:01 2024-06-29 06:25:40 2024-06-29 06:41:24 0:15:44 0:04:34 0:11:10 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
Failure Reason:

Command failed on smithi139 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778436 2024-06-28 21:11:02 2024-06-29 06:26:01 2024-06-29 06:43:40 0:17:39 0:07:01 0:10:38 smithi main centos 9.stream orch/cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi114 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778437 2024-06-28 21:11:03 2024-06-29 06:26:31 2024-06-29 06:56:46 0:30:15 0:19:06 0:11:09 smithi main centos 9.stream orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/connectivity} 2
Failure Reason:

Command failed on smithi186 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 791f3682-35e2-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778438 2024-06-28 21:11:04 2024-06-29 06:27:32 2024-06-29 06:45:42 0:18:10 0:07:27 0:10:43 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/classic task/test_set_mon_crush_locations} 3
Failure Reason:

Command failed on smithi040 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778439 2024-06-28 21:11:05 2024-06-29 06:28:12 2024-06-29 06:43:40 0:15:28 0:04:51 0:10:37 smithi main centos 9.stream orch/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi060 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778440 2024-06-28 21:11:06 2024-06-29 06:28:43 2024-06-29 06:46:33 0:17:50 0:07:18 0:10:32 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/jaeger 3-final} 2
Failure Reason:

Command failed on smithi070 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778441 2024-06-28 21:11:07 2024-06-29 06:28:53 2024-06-29 06:46:47 0:17:54 0:07:24 0:10:30 smithi main ubuntu 22.04 orch/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi062 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778442 2024-06-28 21:11:08 2024-06-29 06:28:53 2024-06-29 07:03:02 0:34:09 0:21:54 0:12:15 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.1} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi027 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v18.2.1 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4d17a33e-35e3-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778443 2024-06-28 21:11:09 2024-06-29 06:29:14 2024-06-29 06:46:03 0:16:49 0:07:04 0:09:45 smithi main centos 9.stream orch/cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/connectivity task/test_cephadm_timeout} 1
Failure Reason:

Command failed on smithi043 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

pass 7778444 2024-06-28 21:11:10 2024-06-29 06:29:14 2024-06-29 06:53:30 0:24:16 0:13:41 0:10:35 smithi main ubuntu 22.04 orch/cephadm/orchestrator_cli/{0-random-distro$/{ubuntu_22.04} 2-node-mgr agent/on orchestrator_cli} 2
fail 7778445 2024-06-28 21:11:11 2024-06-29 06:29:25 2024-06-29 06:45:50 0:16:25 0:05:26 0:10:59 smithi main centos 9.stream orch/cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_domain} 2
Failure Reason:

Command failed on smithi026 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778446 2024-06-28 21:11:12 2024-06-29 06:29:35 2024-06-29 06:45:29 0:15:54 0:04:38 0:11:16 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/mirror 3-final} 2
Failure Reason:

Command failed on smithi073 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778447 2024-06-28 21:11:13 2024-06-29 06:29:46 2024-06-29 06:43:25 0:13:39 0:04:33 0:09:06 smithi main centos 9.stream orch/cephadm/smoke-singlehost/{0-random-distro$/{centos_9.stream_runc} 1-start 2-services/rgw 3-final} 1
Failure Reason:

Command failed on smithi089 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778448 2024-06-28 21:11:14 2024-06-29 06:29:56 2024-06-29 06:47:13 0:17:17 0:04:51 0:12:26 smithi main centos 9.stream orch/cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/classic start} 3
Failure Reason:

Command failed on smithi143 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778449 2024-06-28 21:11:15 2024-06-29 06:30:06 2024-06-29 06:48:07 0:18:01 0:07:30 0:10:31 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/connectivity task/test_ca_signed_key} 2
Failure Reason:

Command failed on smithi031 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778450 2024-06-28 21:11:16 2024-06-29 06:30:07 2024-06-29 06:45:07 0:15:00 0:04:32 0:10:28 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-haproxy-proto 3-final} 2
Failure Reason:

Command failed on smithi098 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778451 2024-06-28 21:11:17 2024-06-29 06:30:07 2024-06-29 06:52:15 0:22:08 0:09:11 0:12:57 smithi main ubuntu 22.04 orch/cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/packaged mon_election/connectivity msgr/async-v1only start tasks/rotate-keys} 2
Failure Reason:

Command failed on smithi016 with status 1: 'sudo cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778452 2024-06-28 21:11:18 2024-06-29 06:30:07 2024-06-29 06:49:56 0:19:49 0:07:24 0:12:25 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

Command failed on smithi022 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778453 2024-06-28 21:11:19 2024-06-29 06:30:08 2024-06-29 06:46:22 0:16:14 0:04:38 0:11:36 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi099 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778454 2024-06-28 21:11:20 2024-06-29 06:30:08 2024-06-29 06:48:47 0:18:39 0:07:18 0:11:21 smithi main centos 9.stream orch/cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi146 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

pass 7778455 2024-06-28 21:11:21 2024-06-29 06:30:18 2024-06-29 07:02:18 0:32:00 0:19:45 0:12:15 smithi main ubuntu 22.04 orch/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/classic task/test_cephadm} 1
fail 7778456 2024-06-28 21:11:22 2024-06-29 06:32:09 2024-06-29 06:47:58 0:15:49 0:04:28 0:11:21 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

Command failed on smithi092 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778457 2024-06-28 21:11:23 2024-06-29 06:32:39 2024-06-29 06:59:57 0:27:18 0:17:33 0:09:45 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi042 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:quincy shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 946d8852-35e3-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7778458 2024-06-28 21:11:24 2024-06-29 06:32:50 2024-06-29 06:50:27 0:17:37 0:07:33 0:10:04 smithi main centos 9.stream orch/cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi100 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778459 2024-06-28 21:11:25 2024-06-29 06:33:10 2024-06-29 06:50:55 0:17:45 0:07:42 0:10:03 smithi main centos 9.stream orch/cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_basic} 2
Failure Reason:

Command failed on smithi194 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778460 2024-06-28 21:11:26 2024-06-29 06:33:41 2024-06-29 06:48:45 0:15:04 0:05:03 0:10:01 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

Command failed on smithi028 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

pass 7778461 2024-06-28 21:11:27 2024-06-29 06:33:41 2024-06-29 06:48:51 0:15:10 0:06:10 0:09:00 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/connectivity task/test_cephadm_repos} 1
fail 7778462 2024-06-28 21:11:28 2024-06-29 06:33:41 2024-06-29 06:49:14 0:15:33 0:04:39 0:10:54 smithi main centos 9.stream orch/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi087 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778463 2024-06-28 21:11:29 2024-06-29 06:34:22 2024-06-29 06:52:10 0:17:48 0:07:18 0:10:30 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
Failure Reason:

Command failed on smithi081 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778464 2024-06-28 21:11:30 2024-06-29 06:34:32 2024-06-29 07:14:43 0:40:11 0:28:42 0:11:29 smithi main ubuntu 22.04 orch/cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
Failure Reason:

Command failed on smithi138 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ffbfd8d0-35e3-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.mgr | length == 2\'"\'"\'\''

fail 7778465 2024-06-28 21:11:31 2024-06-29 06:34:53 2024-06-29 06:50:07 0:15:14 0:04:48 0:10:26 smithi main centos 9.stream orch/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi059 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778466 2024-06-28 21:11:32 2024-06-29 06:35:13 2024-06-29 06:48:22 0:13:09 0:04:32 0:08:37 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final} 2
Failure Reason:

Command failed on smithi033 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778467 2024-06-28 21:11:33 2024-06-29 06:35:23 2024-06-29 06:51:04 0:15:41 0:04:49 0:10:52 smithi main centos 9.stream orch/cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 3
Failure Reason:

Command failed on smithi012 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778468 2024-06-28 21:11:34 2024-06-29 06:35:24 2024-06-29 06:56:03 0:20:39 0:08:55 0:11:44 smithi main ubuntu 22.04 orch/cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi145 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778469 2024-06-28 21:11:35 2024-06-29 06:35:34 2024-06-29 06:54:05 0:18:31 0:08:59 0:09:32 smithi main ubuntu 22.04 orch/cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/root mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi078 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778470 2024-06-28 21:11:36 2024-06-29 06:35:45 2024-06-29 06:53:40 0:17:55 0:07:30 0:10:25 smithi main centos 9.stream orch/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/classic task/test_extra_daemon_features} 2
Failure Reason:

Command failed on smithi007 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778471 2024-06-28 21:11:37 2024-06-29 06:36:25 2024-06-29 06:51:48 0:15:23 0:04:33 0:10:50 smithi main centos 9.stream orch/cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

Command failed on smithi046 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'

fail 7778472 2024-06-28 21:11:38 2024-06-29 06:36:35 2024-06-29 07:07:46 0:31:11 0:20:42 0:10:29 smithi main centos 9.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.0} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi106 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v18.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 11e5a562-35e4-11ef-bca8-c7b262605968 -e sha1=c39e712d7c0a32002068c7313f129279cce6c132 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | keys\'"\'"\' | grep $sha1\''

fail 7778473 2024-06-28 21:11:39 2024-06-29 06:36:36 2024-06-29 06:58:35 0:21:59 0:09:45 0:12:14 smithi main ubuntu 22.04 orch/cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/connectivity task/test_orch_cli_mon} 5
Failure Reason:

Command failed on smithi067 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778474 2024-06-28 21:11:40 2024-06-29 06:38:27 2024-06-29 06:59:54 0:21:27 0:08:43 0:12:44 smithi main ubuntu 22.04 orch/cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_domain} 2
Failure Reason:

Command failed on smithi037 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778475 2024-06-28 21:11:41 2024-06-29 06:38:37 2024-06-29 06:56:08 0:17:31 0:07:30 0:10:01 smithi main ubuntu 22.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
Failure Reason:

Command failed on smithi003 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778476 2024-06-28 21:11:42 2024-06-29 06:38:37 2024-06-29 07:00:49 0:22:12 0:09:17 0:12:55 smithi main ubuntu 22.04 orch/cephadm/workunits/{0-distro/ubuntu_22.04 agent/off mon_election/connectivity task/test_host_drain} 3
Failure Reason:

Command failed on smithi094 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'

fail 7778477 2024-06-28 21:11:43 2024-06-29 06:38:58 2024-06-29 06:58:52 0:19:54 0:07:32 0:12:22 smithi main ubuntu 22.04 orch/rook/smoke/{0-distro/ubuntu_22.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/flannel rook/master} 3
Failure Reason:

Command failed on smithi088 with status 100: "sudo apt update && sudo apt install -y apt-transport-https ca-certificates curl && sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && echo 'deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main' | sudo tee /etc/apt/sources.list.d/kubernetes.list && sudo apt update && sudo apt install -y kubelet kubeadm kubectl bridge-utils"

fail 7778478 2024-06-28 21:11:44 2024-06-29 06:38:58 2024-06-29 06:59:02 0:20:04 0:07:25 0:12:39 smithi main ubuntu 22.04 orch/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi039 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c39e712d7c0a32002068c7313f129279cce6c132 pull'