ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-adk-testing-2023-11-01-0807
wip-adk-testing-2023-11-01-0807
main
smithi
rhel 8.6
orch:cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_host_drain}
Command failed on smithi018 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:054a55d156d3a2f0db54aab582507ae7417905e5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 86dc7ca0-7a56-11ee-8db9-212e2dc638e7 -- bash -c \'set -ex\nHOSTNAMES=$(ceph orch host ls --format json | jq -r \'"\'"\'.[] | .hostname\'"\'"\')\nfor host in $HOSTNAMES; do\n # find the hostname for "host.c" which will have no mgr\n HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq \'"\'"\'any(.daemon_type == "mgr")\'"\'"\')\n if [ "$HAS_MGRS" == "false" ]; then\n HOST_C="${host}"\n fi\ndone\n# now drain that host\nceph orch host drain $HOST_C --zap-osd-devices\n# wait for drain to complete\nHOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\nwhile [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do\n sleep 15\n HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\ndone\n# we want to check the ability to remove the host from\n# the CRUSH map, so we should first verify the host is in\n# the CRUSH map.\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then\n printf "Expected to see $HOST_C in CRUSH map. Saw:\\n\\n$CRUSH_MAP"\n exit 1\nfi\n# If the drain was successful, we should be able to remove the\n# host without force with no issues. If there are still daemons\n# we will get a response telling us to drain the host and a\n# non-zero return code\nceph orch host rm $HOST_C --rm-crush-entry\n# verify we\'"\'"\'ve successfully removed the host from the CRUSH map\nsleep 30\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif grep -q "$HOST_C" <<< "$CRUSH_MAP"; then\n printf "Saw $HOST_C in CRUSH map after it should have been removed.\\n\\n$CRUSH_MAP"\n exit 1\nfi\n\''
wip-adk-testing-2023-11-01-0807
wip-adk-testing-2023-11-01-0807
main
smithi
centos 8.stream
orch:cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final}
Command failed on smithi084 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d46a7d60-7a55-11ee-8db9-212e2dc638e7 -e sha1=054a55d156d3a2f0db54aab582507ae7417905e5 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''
wip-adk-testing-2023-11-01-0807
wip-adk-testing-2023-11-01-0807
main
smithi
ubuntu 20.04
orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/connectivity}
wip-adk-testing-2023-11-01-0807
wip-adk-testing-2023-11-01-0807
main
smithi
rhel 8.6
orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-haproxy-proto 3-final}
wip-adk-testing-2023-11-01-0807
wip-adk-testing-2023-11-01-0807
main
smithi
ubuntu 20.04
orch:cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_host_drain}
Command failed on smithi072 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:054a55d156d3a2f0db54aab582507ae7417905e5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid bd0b3be0-7a56-11ee-8db9-212e2dc638e7 -- bash -c \'set -ex\nHOSTNAMES=$(ceph orch host ls --format json | jq -r \'"\'"\'.[] | .hostname\'"\'"\')\nfor host in $HOSTNAMES; do\n # find the hostname for "host.c" which will have no mgr\n HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq \'"\'"\'any(.daemon_type == "mgr")\'"\'"\')\n if [ "$HAS_MGRS" == "false" ]; then\n HOST_C="${host}"\n fi\ndone\n# now drain that host\nceph orch host drain $HOST_C --zap-osd-devices\n# wait for drain to complete\nHOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\nwhile [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do\n sleep 15\n HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)\ndone\n# we want to check the ability to remove the host from\n# the CRUSH map, so we should first verify the host is in\n# the CRUSH map.\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then\n printf "Expected to see $HOST_C in CRUSH map. Saw:\\n\\n$CRUSH_MAP"\n exit 1\nfi\n# If the drain was successful, we should be able to remove the\n# host without force with no issues. If there are still daemons\n# we will get a response telling us to drain the host and a\n# non-zero return code\nceph orch host rm $HOST_C --rm-crush-entry\n# verify we\'"\'"\'ve successfully removed the host from the CRUSH map\nsleep 30\nceph osd getcrushmap -o compiled-crushmap\ncrushtool -d compiled-crushmap -o crushmap.txt\nCRUSH_MAP=$(cat crushmap.txt)\nif grep -q "$HOST_C" <<< "$CRUSH_MAP"; then\n printf "Saw $HOST_C in CRUSH map after it should have been removed.\\n\\n$CRUSH_MAP"\n exit 1\nfi\n\''