Description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2024-04-08_22:08:01-orch-reef-distro-default-smithi/7646634/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2024-04-08_22:08:01-orch-reef-distro-default-smithi/7646634/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2024-04-08_22:08:01-orch-reef-distro-default-smithi/7646634
  • description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity}
  • duration: 0:49:36
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor:
  • job_id: 7646634
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2024-04-08_22:08:01-orch-reef-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: reef
    • ceph:
      • conf:
        • global:
          • mon election default strategy: 3
        • mgr:
          • debug mgr: 20
          • debug ms: 1
          • mgr/cephadm/use_agent: False
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 5108bc67b831101bc32a1061150595384640c56c
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: 5108bc67b831101bc32a1061150595384640c56c
      • workunit:
        • branch: reef
        • sha1: d540ebaca6b131a1dd560e7f69e024b133bbaa42
    • owner: scheduled_teuthology@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a']
      • ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b']
    • sentry_event:
    • status: pass
    • success: True
    • branch: reef
    • seed: 4929
    • sha1: 5108bc67b831101bc32a1061150595384640c56c
    • subset: 11/64
    • suite: orch
    • suite_branch: reef
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph_d540ebaca6b131a1dd560e7f69e024b133bbaa42/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph.git
    • suite_sha1: d540ebaca6b131a1dd560e7f69e024b133bbaa42
    • targets:
      • smithi153.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOLYEpB7TGBEaZfCzv3sjUNcmPUyig4izRu0kXtPKuipQ9ZxLfoEV9Hn11MsiU39oevAqBw32fQw9W6Ur7v87Cg=
      • smithi188.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFqfbigvhJTxALgt37552bB+TZnc3cCezhQosE0cWnTpjlwiMXRD0SccBJRi4krVhjZs5vj70/tfhdrt2l577oc=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • pexec:
        • all:
          • sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
          • sudo dnf -y module reset container-tools
          • sudo dnf -y module install container-tools
          • sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
      • cephadm:
        • allow_ptrace: False
        • avoid_pacific_features: True
        • cephadm_branch: v16.2.0
        • cephadm_git_url: https://github.com/ceph/ceph
        • image: quay.io/ceph/ceph:v16.2.0
        • conf:
          • global:
            • mon election default strategy: 3
          • mgr:
            • debug mgr: 20
            • debug ms: 1
            • mgr/cephadm/use_agent: False
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
          • osd:
            • debug ms: 1
            • debug osd: 20
        • flavor: default
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
        • sha1: 5108bc67b831101bc32a1061150595384640c56c
        • cluster: ceph
        • cephadm_mode: root
      • cephadm.shell:
        • mon.a:
          • ceph config set mgr mgr/cephadm/use_repo_digest false --force
      • cephadm.shell:
        • mon.a:
          • radosgw-admin realm create --rgw-realm=r --default
          • radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
          • radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default
          • radosgw-admin period update --rgw-realm=r --commit
          • ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000
          • ceph osd pool create foo
          • rbd pool init foo
          • ceph orch apply iscsi foo u p
          • sleep 180
          • ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
          • ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
          • ceph config set global log_to_journald false --force
          • ceph orch ps
          • ceph versions
          • ceph -s
          • ceph orch ls
          • ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1
          • ceph orch ps --refresh
          • sleep 180
          • ceph orch ps
          • ceph versions
          • ceph -s
          • ceph health detail
          • ceph versions | jq -e '.mgr | length == 2'
          • ceph mgr fail
          • sleep 180
          • ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1
          • ceph orch ps --refresh
          • sleep 180
          • ceph orch ps
          • ceph versions
          • ceph health detail
          • ceph -s
          • ceph mgr fail
          • sleep 180
          • ceph orch ps
          • ceph versions
          • ceph -s
          • ceph health detail
          • ceph versions | jq -e '.mgr | length == 1'
          • ceph mgr fail
          • sleep 180
          • ceph orch ps
          • ceph orch ls
          • ceph versions
          • ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr
          • while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
          • ceph versions | jq -e '.mgr | length == 1'
          • ceph versions | jq -e '.mgr | keys' | grep $sha1
          • ceph versions | jq -e '.overall | length == 2'
          • ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2'
          • ceph orch upgrade status
          • ceph health detail
          • ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '{print $2}')
          • while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
          • ceph orch ps
          • ceph versions | jq -e '.mon | length == 2'
          • ceph orch upgrade status
          • ceph health detail
          • ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '{print $2}')
          • while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
          • ceph orch ps
          • ceph versions | jq -e '.mon | length == 1'
          • ceph versions | jq -e '.mon | keys' | grep $sha1
          • ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 5'
          • ceph orch upgrade status
          • ceph health detail
          • ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2
          • while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
          • ceph orch ps
          • ceph versions | jq -e '.osd | length == 2'
          • ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 7'
          • ceph orch upgrade status
          • ceph health detail
          • ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1
          • while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
          • ceph orch ps
          • ceph versions | jq -e '.osd | length == 2'
          • ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 8'
          • ceph orch upgrade status
          • ceph health detail
          • ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd
          • while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
          • ceph orch ps
          • ceph versions | jq -e '.osd | length == 1'
          • ceph versions | jq -e '.osd | keys' | grep $sha1
          • ceph orch upgrade status
          • ceph health detail
          • ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.foo
          • while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
          • ceph orch ps
          • ceph versions | jq -e '.rgw | length == 1'
          • ceph versions | jq -e '.rgw | keys' | grep $sha1
          • ceph orch upgrade status
          • ceph health detail
          • ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
      • cephadm.shell:
        • mon.a:
          • while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done
          • ceph orch ps
          • ceph versions
          • echo "wait for servicemap items w/ changing names to refresh"
          • sleep 60
          • ceph orch ps
          • ceph versions
          • ceph orch upgrade status
          • ceph health detail
          • ceph versions | jq -e '.overall | length == 1'
          • ceph versions | jq -e '.overall | keys' | grep $sha1
          • ceph orch ls | grep '^osd '
      • cephadm.shell:
        • mon.a:
          • ceph orch upgrade ls
          • ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0
          • ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 930
    • user: teuthology
    • queue:
    • posted: 2024-04-08 22:11:21
    • started: 2024-04-13 10:36:45
    • updated: 2024-04-13 11:36:16
    • status_class: success
    • runtime: 0:59:31
    • wait_time: 0:09:55