Description: rados/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_host_drain}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2024-04-14_20:00:15-rados-main-distro-default-smithi/7655926/teuthology.log

Failure Reason:

"2024-04-15T16:13:31.383950+0000 mon.a (mon.0) 467 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2024-04-14_20:00:15-rados-main-distro-default-smithi/7655926/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2024-04-14_20:00:15-rados-main-distro-default-smithi/7655926
  • description: rados/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_host_drain}
  • duration: 0:15:48
  • email: ceph-qa@ceph.com
  • failure_reason: "2024-04-15T16:13:31.383950+0000 mon.a (mon.0) 467 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
  • flavor: default
  • job_id: 7655926
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2024-04-14_20:00:15-rados-main-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: main
    • ceph:
      • conf:
        • global:
          • mon election default strategy: 3
        • mgr:
          • debug mgr: 20
          • debug ms: 1
          • mgr/cephadm/use_agent: True
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • MON_DOWN
        • mons down
        • mon down
        • out of quorum
      • sha1: c2d8c3155d854ee204cb7c7261232571229d1d54
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: c2d8c3155d854ee204cb7c7261232571229d1d54
      • selinux:
        • allowlist:
          • scontext=system_u:system_r:logrotate_t:s0
      • workunit:
        • branch: main
        • sha1: 87dbfb632e974cb2c28d17423071d2d833b53b29
    • owner: scheduled_teuthology@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.a', 'osd.0', 'osd.1']
      • ['host.b', 'mon.b', 'mgr.b', 'osd.2', 'osd.3']
      • ['host.c', 'mon.c', 'osd.4', 'osd.5']
    • sentry_event:
    • status: fail
    • success: False
    • branch: main
    • seed: 5211
    • sha1: c2d8c3155d854ee204cb7c7261232571229d1d54
    • subset: 28113/100000
    • suite: rados
    • suite_branch: main
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph_87dbfb632e974cb2c28d17423071d2d833b53b29/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph.git
    • suite_sha1: 87dbfb632e974cb2c28d17423071d2d833b53b29
    • targets:
      • smithi018.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE+LB1aiY9hrRw58AAc0dP3I/AIVbrmJ6cZ8MnldLNX/+i5FXPzHM+DAui8XPql1L5ZPZLtDfQgJDS2yD6UPVzo=
      • smithi066.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPuT6+XPuANTWEnU/hfyVkdGT5rdsq7nlb3HbfX8ZjqPjO4zuvjIxCcKX/pLjy/KOJZpPnDxsNagRT62av1NG4M=
      • smithi116.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC0/nyWsdl9DL1GlKFXZ7Z2vu+gk+6PyBCj5BPyh+6FY4xjg5Mj9WOqTZN4o5JJW9YYg/t01YiWqBddzwimZq5Y=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • pexec:
        • all:
          • sudo dnf install runc -y
          • sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf
          • sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf
      • install:
      • cephadm:
      • cephadm.shell:
        • host.a:
          • set -ex HOSTNAMES=$(ceph orch host ls --format json | jq -r '.[] | .hostname') for host in $HOSTNAMES; do # find the hostname for "host.c" which will have no mgr HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq 'any(.daemon_type == "mgr")') if [ "$HAS_MGRS" == "false" ]; then HOST_C="${host}" fi done # One last thing to worry about before draining the host # is that the teuthology test tends to put the explicit # hostnames in the placement for the mon service. # We want to make sure we can drain without providing # --force and there is a check for the host being removed # being listed explicitly in the placements. Therefore, # we should remove it from the mon placement. ceph orch ls mon --export > mon.yaml sed /"$HOST_C"/d mon.yaml > mon_adjusted.yaml ceph orch apply -i mon_adjusted.yaml # now drain that host ceph orch host drain $HOST_C --zap-osd-devices # wait for drain to complete HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C) while [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do sleep 15 HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C) done # we want to check the ability to remove the host from # the CRUSH map, so we should first verify the host is in # the CRUSH map. ceph osd getcrushmap -o compiled-crushmap crushtool -d compiled-crushmap -o crushmap.txt CRUSH_MAP=$(cat crushmap.txt) if ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then printf "Expected to see $HOST_C in CRUSH map. Saw:\n\n$CRUSH_MAP" exit 1 fi # If the drain was successful, we should be able to remove the # host without force with no issues. If there are still daemons # we will get a response telling us to drain the host and a # non-zero return code ceph orch host rm $HOST_C --rm-crush-entry # verify we've successfully removed the host from the CRUSH map sleep 30 ceph osd getcrushmap -o compiled-crushmap crushtool -d compiled-crushmap -o crushmap.txt CRUSH_MAP=$(cat crushmap.txt) if grep -q "$HOST_C" <<< "$CRUSH_MAP"; then printf "Saw $HOST_C in CRUSH map after it should have been removed.\n\n$CRUSH_MAP" exit 1 fi
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 951
    • user: teuthology
    • queue:
    • posted: 2024-04-14 20:05:23
    • started: 2024-04-15 15:51:31
    • updated: 2024-04-15 16:18:34
    • status_class: danger
    • runtime: 0:27:03
    • wait_time: 0:11:15