Description: rbd:nvmeof/{0-single-container-host base/install cluster/{fixed-3 openstack} workloads/cephadm_nvmeof}

Log: http://qa-proxy.ceph.com/teuthology/baum-2023-07-27_11:09:28-rbd:nvmeof-wip-baum-cephadm-nvmeof-teuthology-38dea7dc-distro-default-smithi/7354515/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=450c421540964b3aa56e5b44f2fb647f

Failure Reason:

{"Creating volume group 'vg_nvme' failed", '/dev/vg_nvme: already exists in filesystem', "Run `vgcreate --help' for more information."}

  • log_href: http://qa-proxy.ceph.com/teuthology/baum-2023-07-27_11:09:28-rbd:nvmeof-wip-baum-cephadm-nvmeof-teuthology-38dea7dc-distro-default-smithi/7354515/teuthology.log
  • archive_path: /home/teuthworker/archive/baum-2023-07-27_11:09:28-rbd:nvmeof-wip-baum-cephadm-nvmeof-teuthology-38dea7dc-distro-default-smithi/7354515
  • description: rbd:nvmeof/{0-single-container-host base/install cluster/{fixed-3 openstack} workloads/cephadm_nvmeof}
  • duration: 576
  • email: aindenba@redhat.com
  • failure_reason: {"Creating volume group 'vg_nvme' failed", '/dev/vg_nvme: already exists in filesystem', "Run `vgcreate --help' for more information."}
  • flavor:
  • job_id: 7354515
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: baum-2023-07-27_11:09:28-rbd:nvmeof-wip-baum-cephadm-nvmeof-teuthology-38dea7dc-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: wip-baum-cephadm-nvmeof-teuthology-38dea7dc
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 38dea7dc2156bd003d2291e9f2d82688490beb53
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • install:
      • ceph:
        • flavor: default
        • sha1: 38dea7dc2156bd003d2291e9f2d82688490beb53
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • branch: wip-baum-cephadm-nvmeof-teuthology
      • sha1: 1f02a5445d0166bd713cdb066fd1a7520e216b9d
  • owner: scheduled_baum@teuthology
  • pid:
  • roles:
    • ['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0']
    • ['mon.b', 'osd.2', 'osd.3', 'osd.4', 'client.1']
    • ['mon.c', 'osd.5', 'osd.6', 'osd.7', 'client.2']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=450c421540964b3aa56e5b44f2fb647f
  • status: fail
  • success: False
  • branch: wip-baum-cephadm-nvmeof-teuthology-38dea7dc
  • seed:
  • sha1: 38dea7dc2156bd003d2291e9f2d82688490beb53
  • subset:
  • suite:
  • suite_branch: wip-baum-cephadm-nvmeof-teuthology
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 1f02a5445d0166bd713cdb066fd1a7520e216b9d
  • targets:
    • smithi002.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDA3vYWLv2O3IYwK7uTpIJRUbIiR3RL+o5sgCfdc0pgUQrAt0F47/YrrQ74uQAvjbWQq+Y2NBMYtapukEUZKnknSukktAWN1aOxShpc+t46Nx6gGeZiV0UzUCD2z7bkxWjyAUuanUR6+4Ap7rXz1o1M3tOyo5P2Whrx1CvnJVEfuyhJAyCh4ikn1rqvPLjZrIf56Df2EUjgWioaTm0ccKN5ZgNSyU/e8qMngP6tqL5FrojMSQRc1P7ddp17uMFzg1IYSvX1WP2fnS27veg/CKzhBHRd/7uhc81KnQJLuX6v3ZftJoZM7576bgxGhkujEBSnmHl+IIcljAi9LhEtUXiRElX1jsx0k3dwTY8oiXAsLiTKB8Q3zAfM3296JgKhHyOyakorAkiN4FQGiJOv1lkpUDXZSeTQwgRi1esuDnBLNlEQ7onpvKF0iCJhNV3GeYctZe0r1zPyPImR12EkaGV31XSNRnYE/4R8ncXmx8oj2jaKQyqFsgV/m2cb7V58VgE=
    • smithi160.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQD6/0/vHggY5XXhOJSeXOLMwG1gBoh8C1P/BGBdCTmjhkVNVbzIunYtY8FzHsmN78ARELQpv+M+hiVXeoYwcYo04fvTsYDt+FJM+HbBdrt/9j0yhvCiKpc6xybj3fXY3NjmaZbTYTkLBt2xCfyAsX5byyCdutn+3elr7fjQNeJRK9u9m0Xhe1ToCSNjIZ+UnW7jP70Mj8paP7RRDUo18gtAj/+kbismgbvHghtGI5fV4IT1CUMCz7yMmZEVkYi4+UwwDzJgWZP+4m/80K0+B8iQ2PJ8KMk3z7kVpL683Cs/0lAL9cP2wFbqK23Yd7VRZ+yWRczQFgpVikeDmwHgI6R/D0gbrBAOkuBa/j3Sjz/uAxtP95CbxudE4uwjfklwZ3hPPDW4LsqFxFyCvvkzDFXF/fwpTQ9koXukoL4D9cnd0zDO5s1d8RHaPcm1O/m//L7tQJ02hrBrvP3FUNC4KZFetyTa5lUFIn+raEqgzlo69kgbcO0RiBnr+Ajo3Np/QdE=
    • smithi193.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCdUJ2wHrtSNJfMhqZdaxHhzAYm+cTuqrwwuGsqtkpfQAsnJs+vZA9XEz9m2AJK2JLSIIPzYJ72n5vB2f0GVZZpF/3OfMUzN0VgBRXKgp5Wa2WJa12w+Bk3vVlYPvGQ9NrOX7jELM0BrlaORO2VTujxvW4LhBSVAhWc9edFe05827YVFFOuEKK95nm1vaF3Ve5xWtzO4dXoJhopAVz2VM7lomTRtPMG/m42opmJcvhBcX5VfO2LlMFE2abIFOF9Be8WDb4MnCl549BkuBhWRXi0E5zCeWvmdVSnAHSvy+Ljl5oFuG0T4mMcgQM1yQ3+QQF9Ohoy77QsE1TZQw6eO1gCgT0IcXwIDY1TpV54vm29iUinqI5fVdRWlaZqafP5L6TkUR6dKYdOsN0fi1c5dI7tooelu1hIspX7AHk8alZcLxRVeZlgZw714z+VSOa1C7kTzNdrYixdA7uI2yM3BrEYSYrPpiK5ZuuuBZQcfzY6vRQ3H0+UGd8Ug6V7WUxW8e0=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • pexec:
      • all:
        • sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
        • sudo dnf -y module reset container-tools
        • sudo dnf -y module install container-tools --allowerasing --nobest
        • sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
        • sudo sed -i 's/runtime = "runc"/#runtime = "runc"/g' /usr/share/containers/containers.conf
        • sudo sed -i 's/#runtime = "crun"/runtime = "crun"/g' /usr/share/containers/containers.conf
    • cephadm:
    • cephadm.shell:
      • host.a:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
        • ceph osd lspools
        • ceph osd pool create mypool
        • ceph osd pool application enable mypool rbd
        • ceph osd lspools
        • ceph osd pool ls detail
        • set -ex # ceph config get mgr mgr/cephadm/container_image_nvmeof ceph orch apply nvmeof mypool --placement="1 $(hostname)" # Per adk: the --image flag on ceph orch redeploy is only really meant for the ceph daemons, which is why it's trying the inspect-image there. The proper way to change the image for the nvme-of daemon would be # ceph orch daemon redeploy $d --image quay.io/baum/nvmeof:0.0.1 # # ceph config set mgr mgr/cephadm/container_image_nvmeof # ceph orch redeploy nvmeof.mypool # service name reported by ‘ceph orch ls’
    • cephadm.wait_for_service:
      • service: nvmeof.mypool
    • exec:
      • host.a:
        • set -ex # print nvmeof start up logs s=$(systemctl list-units | grep nvmeof.mypool | awk '{print $1}') PAGER=cat journalctl -u $s
    • install:
      • extra_packages:
        • nvme-cli
    • exec:
      • client.0:
        • modprobe nvme-fabrics
    • cephadm.shell:
      • host.a:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
        • ceph osd lspools
        • set -ex s=$(ceph orch ps | grep nvmeof.mypool | awk '{print $4}') echo $s [ $s = running ]
  • teuthology_branch: main
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2023-07-27 11:10:42
  • started:
  • updated: 2023-07-27 11:38:00
  • status_class: danger