Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 7445532 2023-11-03 14:14:51 2023-11-03 14:15:49 2023-11-03 14:45:58 0:30:09 0:22:41 0:07:28 smithi main rhel 8.6 orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_host_drain} 3
Failure Reason:

Command failed on smithi018 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:054a55d156d3a2f0db54aab582507ae7417905e5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 86dc7ca0-7a56-11ee-8db9-212e2dc638e7 -- bash -c \'set -ex\nHOSTNAMES=$(ceph orch host ls --format json | jq -r \'"\'"\'.[] | .hostname\'"\'"\')\nfor host in $HOSTNAMES; do\n # find the hostname for "host.c" which will have no mgr\n HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq \'"\'"\'any(.daemon_type == "mgr")\'"\'"\')\n if [ "$HAS_MGRS" == "false" ]; then\n HOST_C="${host}"\n fi\ndone\n# now drain that host\nceph orch host drain $HOST_C --zap-osd-devices\n# wait for drain to complete\nHOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\nwhile [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do\n sleep 15\n HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\ndone\n# we want to check the ability to remove the host from\n# the CRUSH map, so we should first verify the host is in\n# the CRUSH map.\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then\n printf "Expected to see $HOST_C in CRUSH map. Saw:\\n\\n$CRUSH_MAP"\n exit 1\nfi\n# If the drain was successful, we should be able to remove the\n# host without force with no issues. If there are still daemons\n# we will get a response telling us to drain the host and a\n# non-zero return code\nceph orch host rm $HOST_C --rm-crush-entry\n# verify we\'"\'"\'ve successfully removed the host from the CRUSH map\nsleep 30\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif grep -q "$HOST_C" <<< "$CRUSH_MAP"; then\n printf "Saw $HOST_C in CRUSH map after it should have been removed.\\n\\n$CRUSH_MAP"\n exit 1\nfi\n\''

fail 7445533 2023-11-03 14:14:52 2023-11-03 14:16:39 2023-11-03 14:57:39 0:41:00 0:30:55 0:10:05 smithi main centos 8.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi084 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d46a7d60-7a55-11ee-8db9-212e2dc638e7 -e sha1=054a55d156d3a2f0db54aab582507ae7417905e5 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

pass 7445534 2023-11-03 14:14:52 2023-11-03 14:16:40 2023-11-03 15:09:49 0:53:09 0:42:04 0:11:05 smithi main ubuntu 20.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/connectivity} 2
pass 7445535 2023-11-03 14:14:53 2023-11-03 14:17:00 2023-11-03 14:45:09 0:28:09 0:19:59 0:08:10 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-haproxy-proto 3-final} 2
fail 7445536 2023-11-03 14:14:54 2023-11-03 14:18:00 2023-11-03 14:54:41 0:36:41 0:22:44 0:13:57 smithi main ubuntu 20.04 orch:cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_host_drain} 3
Failure Reason:

Command failed on smithi072 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:054a55d156d3a2f0db54aab582507ae7417905e5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid bd0b3be0-7a56-11ee-8db9-212e2dc638e7 -- bash -c \'set -ex\nHOSTNAMES=$(ceph orch host ls --format json | jq -r \'"\'"\'.[] | .hostname\'"\'"\')\nfor host in $HOSTNAMES; do\n # find the hostname for "host.c" which will have no mgr\n HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq \'"\'"\'any(.daemon_type == "mgr")\'"\'"\')\n if [ "$HAS_MGRS" == "false" ]; then\n HOST_C="${host}"\n fi\ndone\n# now drain that host\nceph orch host drain $HOST_C --zap-osd-devices\n# wait for drain to complete\nHOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\nwhile [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do\n sleep 15\n HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\ndone\n# we want to check the ability to remove the host from\n# the CRUSH map, so we should first verify the host is in\n# the CRUSH map.\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then\n printf "Expected to see $HOST_C in CRUSH map. Saw:\\n\\n$CRUSH_MAP"\n exit 1\nfi\n# If the drain was successful, we should be able to remove the\n# host without force with no issues. If there are still daemons\n# we will get a response telling us to drain the host and a\n# non-zero return code\nceph orch host rm $HOST_C --rm-crush-entry\n# verify we\'"\'"\'ve successfully removed the host from the CRUSH map\nsleep 30\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif grep -q "$HOST_C" <<< "$CRUSH_MAP"; then\n printf "Saw $HOST_C in CRUSH map after it should have been removed.\\n\\n$CRUSH_MAP"\n exit 1\nfi\n\''