Description: rados/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/classic task/test_rgw_multisite}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2024-06-09_21:00:03-rados-squid-distro-default-smithi/7746979/teuthology.log

Failure Reason:

"2024-06-10T04:33:50.663896+0000 mon.a (mon.0) 445 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2024-06-09_21:00:03-rados-squid-distro-default-smithi/7746979/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2024-06-09_21:00:03-rados-squid-distro-default-smithi/7746979
  • description: rados/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/classic task/test_rgw_multisite}
  • duration: 0:19:22
  • email: ceph-qa@ceph.com
  • failure_reason: "2024-06-10T04:33:50.663896+0000 mon.a (mon.0) 445 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
  • flavor: default
  • job_id: 7746979
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2024-06-09_21:00:03-rados-squid-distro-default-smithi
  • nuke_on_error:
  • os_type: ubuntu
  • os_version: 22.04
  • overrides:
    • admin_socket:
      • branch: squid
    • ceph:
      • conf:
        • global:
          • mon election default strategy: 1
        • mgr:
          • debug mgr: 20
          • debug ms: 1
          • mgr/cephadm/use_agent: True
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • MON_DOWN
      • log-only-match:
        • CEPHADM_
      • sha1: 18001b18ccec6ef0e131e4537cb87f419f579be0
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: 18001b18ccec6ef0e131e4537cb87f419f579be0
      • workunit:
        • branch: squid
        • sha1: 7b7a5d375cf4e4afab96b9d4d229b699ae8304bb
    • owner: scheduled_teuthology@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.a', 'osd.0']
      • ['host.b', 'mon.b', 'mgr.b', 'osd.1']
      • ['host.c', 'mon.c', 'osd.2']
    • sentry_event:
    • status: fail
    • success: False
    • branch: squid
    • seed: 949
    • sha1: 18001b18ccec6ef0e131e4537cb87f419f579be0
    • subset: 24217/100000
    • suite: rados
    • suite_branch: squid
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph_7b7a5d375cf4e4afab96b9d4d229b699ae8304bb/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph.git
    • suite_sha1: 7b7a5d375cf4e4afab96b9d4d229b699ae8304bb
    • targets:
      • smithi089.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJifvDfogqUAGTJE8NigSNcv5UTzvUY1XunAWv/LsDVA1toAvM1PfTUofq0v6cpel+vRjk0Its6ZD11PdL9V+/Y=
      • smithi138.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNk1bVhzcryE4OD4ooMqxCnwo55c0REiX5kUwd4SAcR7SUOwe5x0Di9JPdppobpJY4xqh8dyLC2YA1qh36cdwQc=
      • smithi174.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBN8HeVvd054JYuyr0y5IOvHTaHCCtgtVE6BYNKzuJsHzFtfixpBFMLRf5vrpR6H8CK19VeOc/CvmQQxDeBwrpKs=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
      • cephadm:
      • cephadm.shell:
        • host.a:
          • ceph mgr module enable rgw
      • rgw_module.apply:
        • specs:
          • rgw_realm: myrealm1
          • rgw_zone: myzone1
          • rgw_zonegroup: myzonegroup1
          • spec:
            • rgw_frontend_port: 5500
      • cephadm.shell:
        • host.a:
          • set -e set -x while true; do TOKEN=$(ceph rgw realm tokens | jq -r '.[0].token'); echo $TOKEN; if [ "$TOKEN" != "master zone has no endpoint" ]; then break; fi; sleep 5; done TOKENS=$(ceph rgw realm tokens) echo $TOKENS | jq --exit-status '.[0].realm == "myrealm1"' echo $TOKENS | jq --exit-status '.[0].token' TOKEN_JSON=$(ceph rgw realm tokens | jq -r '.[0].token' | base64 --decode) echo $TOKEN_JSON | jq --exit-status '.realm_name == "myrealm1"' echo $TOKEN_JSON | jq --exit-status '.endpoint | test("http://.+:\\d+")' echo $TOKEN_JSON | jq --exit-status '.realm_id | test("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")' echo $TOKEN_JSON | jq --exit-status '.access_key' echo $TOKEN_JSON | jq --exit-status '.secret'
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 101
    • user: teuthology
    • queue:
    • posted: 2024-06-09 21:03:01
    • started: 2024-06-10 04:06:32
    • updated: 2024-06-10 04:40:30
    • status_class: danger
    • runtime: 0:33:58
    • wait_time: 0:14:36