Description: rados/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_monitoring_stack_basic}

Log: http://qa-proxy.ceph.com/teuthology/lflores-2024-04-01_18:07:25-rados-wip-yuri8-testing-2024-03-25-1419-distro-default-smithi/7634096/teuthology.log

Failure Reason:

"2024-04-01T19:43:37.245073+0000 mon.a (mon.0) 441 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/lflores-2024-04-01_18:07:25-rados-wip-yuri8-testing-2024-03-25-1419-distro-default-smithi/7634096/teuthology.log
  • archive_path: /home/teuthworker/archive/lflores-2024-04-01_18:07:25-rados-wip-yuri8-testing-2024-03-25-1419-distro-default-smithi/7634096
  • description: rados/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_monitoring_stack_basic}
  • duration: 0:25:17
  • email:
  • failure_reason: "2024-04-01T19:43:37.245073+0000 mon.a (mon.0) 441 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
  • flavor: default
  • job_id: 7634096
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: lflores-2024-04-01_18:07:25-rados-wip-yuri8-testing-2024-03-25-1419-distro-default-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 22.04
  • overrides:
    • admin_socket:
      • branch: wip-yuri8-testing-2024-03-25-1419
    • ceph:
      • conf:
        • global:
          • mon election default strategy: 3
        • mgr:
          • debug mgr: 20
          • debug ms: 1
          • mgr/cephadm/use_agent: True
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • MON_DOWN
        • mons down
        • mon down
        • out of quorum
      • sha1: e142085d06ebbedb32196116c9f19e2d28f93430
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: e142085d06ebbedb32196116c9f19e2d28f93430
      • workunit:
        • branch: wip-tracker-64864
        • sha1: b64378df0aa36ce626cf358e9d2b6f4658480c2f
    • owner: scheduled_lflores@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.a', 'osd.0']
      • ['host.b', 'mon.b', 'mgr.b', 'osd.1']
      • ['host.c', 'mon.c', 'osd.2']
    • sentry_event:
    • status: fail
    • success: False
    • branch: wip-yuri8-testing-2024-03-25-1419
    • seed: 3833
    • sha1: e142085d06ebbedb32196116c9f19e2d28f93430
    • subset: 111/120000
    • suite: rados
    • suite_branch: wip-tracker-64864
    • suite_path: /home/teuthworker/src/github.com_ljflores_ceph_b64378df0aa36ce626cf358e9d2b6f4658480c2f/qa
    • suite_relpath: qa
    • suite_repo: https://github.com/ljflores/ceph.git
    • suite_sha1: b64378df0aa36ce626cf358e9d2b6f4658480c2f
    • targets:
      • smithi040.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEAjwEQcMOhW7fLKAZn6aeo1fLZ401Lu1yzyz47s5v5r6I67za++XNr/25NHpWPvfkpsUemSvD33ZZnukot4/98=
      • smithi046.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOrW+R40fm1mQwSt/ScXmvoKn+flGmJDD5z+sHlYpn7RsiU9vsE6Yq6xgS+Sy7gla13cy6LotFcZyD/eNVfDjjE=
      • smithi063.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjC/GJw0vX7031xB6/oMOen+3RUMD4pUCrxsNUYJ0L4og5k/aCEbM7WTfqahzsodZUDdcQAWNHArsYD3hVt3+E=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: True
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
      • cephadm:
      • cephadm.shell:
        • host.a:
          • set -e set -x ceph orch apply node-exporter ceph orch apply grafana ceph orch apply alertmanager ceph orch apply prometheus sleep 240 ceph orch ls ceph orch ps ceph orch host ls MON_DAEMON=$(ceph orch ps --daemon-type mon -f json | jq -r 'last | .daemon_name') GRAFANA_HOST=$(ceph orch ps --daemon-type grafana -f json | jq -e '.[]' | jq -r '.hostname') PROM_HOST=$(ceph orch ps --daemon-type prometheus -f json | jq -e '.[]' | jq -r '.hostname') ALERTM_HOST=$(ceph orch ps --daemon-type alertmanager -f json | jq -e '.[]' | jq -r '.hostname') GRAFANA_IP=$(ceph orch host ls -f json | jq -r --arg GRAFANA_HOST "$GRAFANA_HOST" '.[] | select(.hostname==$GRAFANA_HOST) | .addr') PROM_IP=$(ceph orch host ls -f json | jq -r --arg PROM_HOST "$PROM_HOST" '.[] | select(.hostname==$PROM_HOST) | .addr') ALERTM_IP=$(ceph orch host ls -f json | jq -r --arg ALERTM_HOST "$ALERTM_HOST" '.[] | select(.hostname==$ALERTM_HOST) | .addr') # check each host node-exporter metrics endpoint is responsive ALL_HOST_IPS=$(ceph orch host ls -f json | jq -r '.[] | .addr') for ip in $ALL_HOST_IPS; do curl -s http://${ip}:9100/metric done # check grafana endpoints are responsive and database health is okay curl -k -s https://${GRAFANA_IP}:3000/api/health curl -k -s https://${GRAFANA_IP}:3000/api/health | jq -e '.database == "ok"' # stop mon daemon in order to trigger an alert ceph orch daemon stop $MON_DAEMON sleep 120 # check prometheus endpoints are responsive and mon down alert is firing curl -s http://${PROM_IP}:9095/api/v1/status/config curl -s http://${PROM_IP}:9095/api/v1/status/config | jq -e '.status == "success"' curl -s http://${PROM_IP}:9095/api/v1/alerts curl -s http://${PROM_IP}:9095/api/v1/alerts | jq -e '.data | .alerts | .[] | select(.labels | .alertname == "CephMonDown") | .state == "firing"' # check alertmanager endpoints are responsive and mon down alert is active curl -s http://${ALERTM_IP}:9093/api/v1/status curl -s http://${ALERTM_IP}:9093/api/v1/alerts curl -s http://${ALERTM_IP}:9093/api/v1/alerts | jq -e '.data | .[] | select(.labels | .alertname == "CephMonDown") | .status | .state == "active"'
    • teuthology_branch: main
    • verbose: True
    • pcp_grafana_url:
    • priority: 75
    • user: lflores
    • queue:
    • posted: 2024-04-01 18:09:14
    • started: 2024-04-01 19:14:23
    • updated: 2024-04-01 19:55:16
    • status_class: danger
    • runtime: 0:40:53
    • wait_time: 0:15:36