Description: rbd:nvmeof/{0-single-container-host base/install cluster/{fixed-3 openstack} workloads/cephadm_nvmeof}

Log: http://qa-proxy.ceph.com/teuthology/bdavidov-2023-08-31_17:26:01-rbd:nvmeof-wip-barakda-cephadm-nvmeof-distro-default-smithi/7385863/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=0c60af94788a40d0a95193b57db4498a

Failure Reason:

Command failed on smithi012 with status 2: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:dd540f6e24dadebcd77532145470e5f4e055089c shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 95a052b8-483b-11ee-9ab3-7b867c8bd7da -- bash -c \'set -ex\n#\nceph config get mgr mgr/cephadm/container_image_nvmeof\nceph orch apply nvmeof mypool --placement="1 $(hostname)"\n# Per adk: the --image flag on ceph orch redeploy is only really meant for the ceph daemons, which is why it\'"\'"\'s trying the inspect-image there. The proper way to change the image for the nvme-of daemon would be\n# ceph orch daemon redeploy $d --image quay.io/baum/nvmeof:0.0.1\n#\n# ceph config set mgr mgr/cephadm/container_image_nvmeof <CUSTOM IMAGE>\n# ceph orch redeploy nvmeof.mypool # service name reported by ‘ceph orch ls’\n\''

  • log_href: http://qa-proxy.ceph.com/teuthology/bdavidov-2023-08-31_17:26:01-rbd:nvmeof-wip-barakda-cephadm-nvmeof-distro-default-smithi/7385863/teuthology.log
  • archive_path: /home/teuthworker/archive/bdavidov-2023-08-31_17:26:01-rbd:nvmeof-wip-barakda-cephadm-nvmeof-distro-default-smithi/7385863
  • description: rbd:nvmeof/{0-single-container-host base/install cluster/{fixed-3 openstack} workloads/cephadm_nvmeof}
  • duration: 0:33:22
  • email:
  • failure_reason: Command failed on smithi012 with status 2: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:dd540f6e24dadebcd77532145470e5f4e055089c shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 95a052b8-483b-11ee-9ab3-7b867c8bd7da -- bash -c \'set -ex\n#\nceph config get mgr mgr/cephadm/container_image_nvmeof\nceph orch apply nvmeof mypool --placement="1 $(hostname)"\n# Per adk: the --image flag on ceph orch redeploy is only really meant for the ceph daemons, which is why it\'"\'"\'s trying the inspect-image there. The proper way to change the image for the nvme-of daemon would be\n# ceph orch daemon redeploy $d --image quay.io/baum/nvmeof:0.0.1\n#\n# ceph config set mgr mgr/cephadm/container_image_nvmeof \n# ceph orch redeploy nvmeof.mypool # service name reported by ‘ceph orch ls’\n\''
  • flavor:
  • job_id: 7385863
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: bdavidov-2023-08-31_17:26:01-rbd:nvmeof-wip-barakda-cephadm-nvmeof-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: wip-barakda-cephadm-nvmeof
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: dd540f6e24dadebcd77532145470e5f4e055089c
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: dd540f6e24dadebcd77532145470e5f4e055089c
      • selinux:
        • whitelist:
          • scontext=system_u:system_r:logrotate_t:s0
      • workunit:
        • branch: wip-barakda-cephadm-nvmeof-teuthology
        • sha1: 5976eb7dba22ab5cfb2c0dfde6a32e6ffc729081
    • owner: scheduled_bdavidov@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0']
      • ['mon.b', 'osd.2', 'osd.3', 'osd.4', 'client.1']
      • ['mon.c', 'osd.5', 'osd.6', 'osd.7', 'client.2']
    • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=0c60af94788a40d0a95193b57db4498a
    • status: fail
    • success: False
    • branch: wip-barakda-cephadm-nvmeof
    • seed:
    • sha1: dd540f6e24dadebcd77532145470e5f4e055089c
    • subset:
    • suite:
    • suite_branch: wip-barakda-cephadm-nvmeof-teuthology
    • suite_path:
    • suite_relpath:
    • suite_repo:
    • suite_sha1: 5976eb7dba22ab5cfb2c0dfde6a32e6ffc729081
    • targets:
      • smithi012.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDh13dQGckvQjrYYQueByPrQlZb6+tW/8Q+ko/r9/YeKHxmP18aTq4ti+Lc5aEc27WbXTJ9klgoGmvS8g6Fxas0Q3MU8+NeqaHqs7mE3MvKQdm0B/fVUnzvzK4LBmHPkPvbHVt9lthyylglM0dJPyY4HwQptQh3p7OdThEU2CwjOpTTncL3NeO4yjjiJ/QzR54xGIzyZWIkBWkAhmlOcBUi+iBNelPcqqVgmaKbUcJsclGGqcmBy/gU5CgPvWITdTygDYHiVGVhVwCrStK/9TgvWje8tJwIzJmdqegOwk16QmbxqEYX0ZeW3qYnMfkPDgGE/ZRimAAQ4rAaTm3A6QzERqtmKpSyUgBY7HO9abhJWNp/JRNN/Y8VzGjk0/CYz8+L1NPCH4K/IYRtrf3SZ+/SXuhQJJlP0AxBsF97pqBLX4ktez5tCe9iptW9k2Eu7HG3YS2WnkcEJZpPgXFE/F9q1rBucbhH4RGunhWy//693QFIrnECaf8ecXQdMvvDLMU=
      • smithi026.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC7PN05p7M4j2rLmjLWujlHRlavK5lCugvvGkklV7qsWudEK3ZPr7OVryKKxYoZ4IHv/pdn2IohWq8LobJ01f3rX8V6yvow6ejiMUTuPn1lgFNhrt6nmoBd3NMHYzTNrV5pOpJ4688EnSJwc0oA+MLc/L8T9Z1DK/a3AUzONh3fv3y9AZd54gkZqWVJr+HztvkeKBTsq0Kj67BKPPmG9wKopmKKPQGq3Wr4++Dd8SIQ3H0QqwdqC3h54fRIEw24zdDNDPR4WH8I//cKhv8vDL85+rnuH3/1m3ykFNAsPAsjhAD+UT0emrXd4CAIJRPi9pChrTc31WqpNjFEbt4JLbwzCU5I64luwNHTUf+CuNgFdeWnwDjP4YB+lW8Sv/prB3/Fvkt7dRHPSNuNq1fh8YGI2q7aKSfOUdRCJAYoMrYJD4D1SU155DQy/+yKwDK+6x7nLy22LECs1HTKsLE97WrekEH3BWxgW67aQQWzedg0JBazm1lunHXTua4omIQUrDc=
      • smithi099.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCzillmyZ0ouJrmrVCK6nnccqB6dDl5WoRYJ5Aood0gE/Yl12pFXzGciywzIvv8erQuv0zz5D/eVkt4+Pdg46ZEGWkdOY27ZQha6FTioE/ISVh8QeLVfXpkxYVDGMFBcN/6E2vwFjOgBDA8ENajEyjbkfEA9wlYv7JueZEmruXUUw7kh4f6IHNkQ+WtTKls4vvuBUtrkzWc1OPAaBvyloOgTd1KoTC0smLfvsvYaQioZ+Yl0wcosYqqs8QyK2peFEygrOqdIxwrwGYnmtt+YXYpSyWY7azyWns9EoVlUinhvuFDZXdp+pBR6Cj5sOXBzRqpFU+cafOCrLcGbJrvsRFjitzHD47A8MviQoXtc2v1Q6WeZW0RSKFmBHAu6UhDgVAt55VArSo49z4lr8dsShNMIbjOKOtjXiekkPj9JJONGEN0lDUmP4L8zACwoUlM0vynByeKicViPS9bUKaqFMxM3WV4+ktviVu5+cnXkL2c1VF0P3zjU+7KJ0Yub8lCu68=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: True
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • pexec:
        • all:
          • sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
          • sudo dnf -y module reset container-tools
          • sudo dnf -y module install container-tools --allowerasing --nobest
          • sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
          • sudo sed -i 's/runtime = "runc"/#runtime = "runc"/g' /usr/share/containers/containers.conf
          • sudo sed -i 's/#runtime = "crun"/runtime = "crun"/g' /usr/share/containers/containers.conf
      • cephadm:
      • cephadm.shell:
        • host.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
          • ceph osd lspools
          • ceph osd pool create mypool
          • ceph osd pool application enable mypool rbd
          • ceph osd lspools
          • ceph osd pool ls detail
          • set -ex # ceph config get mgr mgr/cephadm/container_image_nvmeof ceph orch apply nvmeof mypool --placement="1 $(hostname)" # Per adk: the --image flag on ceph orch redeploy is only really meant for the ceph daemons, which is why it's trying the inspect-image there. The proper way to change the image for the nvme-of daemon would be # ceph orch daemon redeploy $d --image quay.io/baum/nvmeof:0.0.1 # # ceph config set mgr mgr/cephadm/container_image_nvmeof # ceph orch redeploy nvmeof.mypool # service name reported by ‘ceph orch ls’
      • cephadm.wait_for_service:
        • service: nvmeof.mypool
      • exec:
        • host.a:
          • set -ex # print nvmeof start up logs s=$(systemctl list-units | grep nvmeof.mypool | awk '{print $1}') PAGER=cat journalctl -u $s
      • install:
        • extra_packages:
          • nvme-cli
      • exec:
        • client.0:
          • modprobe nvme-fabrics
      • cephadm.shell:
        • host.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
          • ceph osd lspools
          • set -ex s=$(ceph orch ps | grep nvmeof.mypool | awk '{print $4}') echo $s [ $s = running ]
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2023-08-31 17:26:07
    • started: 2023-08-31 19:33:22
    • updated: 2023-08-31 20:26:26
    • status_class: danger
    • runtime: 0:53:04
    • wait_time: 0:19:42