Description: orch/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_monitoring_stack_basic}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2024-04-15_21:08:03-orch-squid-distro-default-smithi/7657546/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=ed3d4d5d6e6d4b8893d603ebb12451a2

Failure Reason:

Command failed on smithi033 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:a9a752df26c63acad72e1b3569fd79a515ca0765 pull'

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2024-04-15_21:08:03-orch-squid-distro-default-smithi/7657546/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2024-04-15_21:08:03-orch-squid-distro-default-smithi/7657546
  • description: orch/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_monitoring_stack_basic}
  • duration: 0:07:27
  • email: ceph-qa@ceph.com
  • failure_reason: Command failed on smithi033 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:a9a752df26c63acad72e1b3569fd79a515ca0765 pull'
  • flavor: default
  • job_id: 7657546
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2024-04-15_21:08:03-orch-squid-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: squid
    • ceph:
      • conf:
        • global:
          • mon election default strategy: 1
        • mgr:
          • debug mgr: 20
          • debug ms: 1
          • mgr/cephadm/use_agent: False
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • MON_DOWN
      • sha1: a9a752df26c63acad72e1b3569fd79a515ca0765
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: a9a752df26c63acad72e1b3569fd79a515ca0765
      • selinux:
        • allowlist:
          • scontext=system_u:system_r:logrotate_t:s0
      • workunit:
        • branch: squid
        • sha1: 461a84e6fbbf089011eafd13b98169fb9e9b8cc9
    • owner: scheduled_teuthology@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.a', 'osd.0']
      • ['host.b', 'mon.b', 'mgr.b', 'osd.1']
      • ['host.c', 'mon.c', 'osd.2']
    • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=ed3d4d5d6e6d4b8893d603ebb12451a2
    • status: fail
    • success: False
    • branch: squid
    • seed: 7775
    • sha1: a9a752df26c63acad72e1b3569fd79a515ca0765
    • subset: 18/64
    • suite: orch
    • suite_branch: squid
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph_461a84e6fbbf089011eafd13b98169fb9e9b8cc9/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph.git
    • suite_sha1: 461a84e6fbbf089011eafd13b98169fb9e9b8cc9
    • targets:
      • smithi033.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBG3ToGhxWTpNvJyoYxjFNzs0KtIX0k0Qv5FT6mzHEP0Rho4Jlz8CGF6tlLedRYoo3/zyMTjwq9J9nnOqxr/OIkk=
      • smithi130.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBANzPhjVY/tFfOSNelN/7jQ9NhklezNDGJQD1y9fSUbiOUVotle1xXOaoO+59253lp1cygMwCMNGAhQCAQxuYgs=
      • smithi153.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLa+A3jXd+oYpgfzcq83X+9prHu0NTwo3hjlhPGHVKmfY28H4nA+UF05lTnw68Z2yzp03MFtDAEJBfXV+n35smc=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
      • cephadm:
      • cephadm.shell:
        • host.a:
          • set -e set -x ceph orch apply node-exporter ceph orch apply grafana ceph orch apply alertmanager ceph orch apply prometheus sleep 240 ceph orch ls ceph orch ps ceph orch host ls MON_DAEMON=$(ceph orch ps --daemon-type mon -f json | jq -r 'last | .daemon_name') GRAFANA_HOST=$(ceph orch ps --daemon-type grafana -f json | jq -e '.[]' | jq -r '.hostname') PROM_HOST=$(ceph orch ps --daemon-type prometheus -f json | jq -e '.[]' | jq -r '.hostname') ALERTM_HOST=$(ceph orch ps --daemon-type alertmanager -f json | jq -e '.[]' | jq -r '.hostname') GRAFANA_IP=$(ceph orch host ls -f json | jq -r --arg GRAFANA_HOST "$GRAFANA_HOST" '.[] | select(.hostname==$GRAFANA_HOST) | .addr') PROM_IP=$(ceph orch host ls -f json | jq -r --arg PROM_HOST "$PROM_HOST" '.[] | select(.hostname==$PROM_HOST) | .addr') ALERTM_IP=$(ceph orch host ls -f json | jq -r --arg ALERTM_HOST "$ALERTM_HOST" '.[] | select(.hostname==$ALERTM_HOST) | .addr') # check each host node-exporter metrics endpoint is responsive ALL_HOST_IPS=$(ceph orch host ls -f json | jq -r '.[] | .addr') for ip in $ALL_HOST_IPS; do curl -s http://${ip}:9100/metric done # check grafana endpoints are responsive and database health is okay curl -k -s https://${GRAFANA_IP}:3000/api/health curl -k -s https://${GRAFANA_IP}:3000/api/health | jq -e '.database == "ok"' # stop mon daemon in order to trigger an alert ceph orch daemon stop $MON_DAEMON sleep 120 # check prometheus endpoints are responsive and mon down alert is firing curl -s http://${PROM_IP}:9095/api/v1/status/config curl -s http://${PROM_IP}:9095/api/v1/status/config | jq -e '.status == "success"' curl -s http://${PROM_IP}:9095/api/v1/alerts curl -s http://${PROM_IP}:9095/api/v1/alerts | jq -e '.data | .alerts | .[] | select(.labels | .alertname == "CephMonDown") | .state == "firing"' # check alertmanager endpoints are responsive and mon down alert is active curl -s http://${ALERTM_IP}:9093/api/v1/status curl -s http://${ALERTM_IP}:9093/api/v1/alerts curl -s http://${ALERTM_IP}:9093/api/v1/alerts | jq -e '.data | .[] | select(.labels | .alertname == "CephMonDown") | .status | .state == "active"'
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 100
    • user: teuthology
    • queue:
    • posted: 2024-04-15 21:10:33
    • started: 2024-04-15 22:01:17
    • updated: 2024-04-15 22:21:11
    • status_class: danger
    • runtime: 0:19:54
    • wait_time: 0:12:27