Description: orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_mgr_ctdb_res_dom}

Log: http://qa-proxy.ceph.com/teuthology/gabrioux-2024-09-13_12:30:34-orch:cephadm-wip-guits-main-2024-09-13-0840-distro-default-smithi/7903653/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=1c5547f089254e4980e4bd504cb1f7a8

Failure Reason:

Command failed on smithi012 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:b0201b8c79733293453e7f10a10c7fa43119222b shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 56b16aba-71d4-11ef-bceb-c7b262605968 -- bash -c 'ceph smb apply -i -'"

  • log_href: http://qa-proxy.ceph.com/teuthology/gabrioux-2024-09-13_12:30:34-orch:cephadm-wip-guits-main-2024-09-13-0840-distro-default-smithi/7903653/teuthology.log
  • archive_path: /home/teuthworker/archive/gabrioux-2024-09-13_12:30:34-orch:cephadm-wip-guits-main-2024-09-13-0840-distro-default-smithi/7903653
  • description: orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_mgr_ctdb_res_dom}
  • duration: 0:31:18
  • email: gabrioux@ibm.com
  • failure_reason: Command failed on smithi012 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:b0201b8c79733293453e7f10a10c7fa43119222b shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 56b16aba-71d4-11ef-bceb-c7b262605968 -- bash -c 'ceph smb apply -i -'"
  • flavor:
  • job_id: 7903653
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: gabrioux-2024-09-13_12:30:34-orch:cephadm-wip-guits-main-2024-09-13-0840-distro-default-smithi
  • nuke_on_error:
  • os_type: ubuntu
  • os_version: 22.04
  • overrides:
    • admin_socket:
      • branch: wip-guits-main-2024-09-13-0840
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • log-only-match:
        • CEPHADM_
      • sha1: b0201b8c79733293453e7f10a10c7fa43119222b
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: b0201b8c79733293453e7f10a10c7fa43119222b
      • workunit:
        • branch: wip-guits-main-2024-09-13-0840
        • sha1: b0201b8c79733293453e7f10a10c7fa43119222b
    • owner: scheduled_gabrioux@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0']
      • ['host.b', 'mon.b', 'osd.2', 'osd.3']
      • ['host.c', 'mon.c', 'osd.4', 'osd.5']
      • ['host.d', 'cephadm.exclude']
    • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=1c5547f089254e4980e4bd504cb1f7a8
    • status: fail
    • success: False
    • branch: wip-guits-main-2024-09-13-0840
    • seed: 4031
    • sha1: b0201b8c79733293453e7f10a10c7fa43119222b
    • subset: 1/8
    • suite: orch:cephadm
    • suite_branch: wip-guits-main-2024-09-13-0840
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph-c_b0201b8c79733293453e7f10a10c7fa43119222b/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph-ci.git
    • suite_sha1: b0201b8c79733293453e7f10a10c7fa43119222b
    • targets:
      • smithi012.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEJvb8tgSyJTyqSBbFnLgq/OwGlPa1yOCq13naG1cy2mJWUNzXDogM/0/+zF3BhEZk6cWUj+pT8zhp1LLsRP5uo=
      • smithi040.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLT0DuuPZ/uNPgcIuG2VrY3+Xv2LsY8Z1BU4+e7/HRHwH/j/i5Nt0ctZi15z3NWOUXEQidbEEe3HI62NAqoVDmA=
      • smithi134.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOa96Cnb9K/A/HwxAt8KKw338eT+cEhMRDTZZIHd7gdanAzxdfGnDg+S1omPIQa3JWptF1aeXhRrBFJOBit+HB0=
      • smithi145.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBD1ZVYKUhTN2J1lAPsyH68Qq9LI9voP91Rc/nP4GrH6LPjt0gWTW8YzY26Udasvn3gAIM6O1qNo9WC5Vw4x+j2g=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • cephadm.deploy_samba_ad_dc:
        • role: host.d
      • cephadm:
      • cephadm.shell:
        • host.a:
          • ceph fs volume create cephfs
      • cephadm.wait_for_service:
        • service: mds.cephfs
      • cephadm.shell:
        • host.a:
          • cmd: ceph fs subvolumegroup create cephfs smb
          • cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777
          • cmd: ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777
          • cmd: ceph mgr module enable smb
          • cmd: sleep 30
          • cmd: ceph smb apply -i -
          • stdin: # --- Begin Embedded YAML - resource_type: ceph.smb.cluster cluster_id: adctdb1 auth_mode: active-directory domain_settings: realm: DOMAIN1.SINK.TEST join_sources: - source_type: resource ref: join1-admin custom_dns: - "172.21.15.145" placement: count: 3 - resource_type: ceph.smb.join.auth auth_id: join1-admin auth: username: Administrator password: Passw0rd - resource_type: ceph.smb.share cluster_id: adctdb1 share_id: share1 cephfs: volume: cephfs subvolumegroup: smb subvolume: sv1 path: / - resource_type: ceph.smb.share cluster_id: adctdb1 share_id: share2 cephfs: volume: cephfs subvolumegroup: smb subvolume: sv2 path: / # --- End Embedded YAML
      • cephadm.wait_for_service:
        • service: smb.adctdb1
      • cephadm.shell:
        • host.a:
          • cmd: rados --pool=.smb -N adctdb1 get cluster.meta.json /dev/stdout
      • cephadm.exec:
        • host.d:
          • sleep 30
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls
      • cephadm.exec:
        • host.a:
          • {{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith("smb.adctdb1")))[-1].name' > /tmp/svcname
          • {{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status
          • cat /tmp/ctdb_status
          • grep 'pnn:0 .*OK' /tmp/ctdb_status
          • grep 'pnn:1 .*OK' /tmp/ctdb_status
          • grep 'pnn:2 .*OK' /tmp/ctdb_status
          • grep 'Number of nodes:3' /tmp/ctdb_status
          • rm -rf /tmp/svcname /tmp/ctdb_status
      • cephadm.exec:
        • host.d:
          • sleep 30
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\ckent%1115Rose. //{{'host.c'|role_to_remote|attr('ip_address')}}/share1 -c ls
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\ckent%1115Rose. //{{'host.c'|role_to_remote|attr('ip_address')}}/share2 -c ls
      • cephadm.shell:
        • host.a:
          • cmd: ceph smb apply -i -
          • stdin: # --- Begin Embedded YAML - resource_type: ceph.smb.cluster cluster_id: adctdb1 intent: removed - resource_type: ceph.smb.join.auth auth_id: join1-admin intent: removed - resource_type: ceph.smb.share cluster_id: adctdb1 share_id: share1 intent: removed - resource_type: ceph.smb.share cluster_id: adctdb1 share_id: share2 intent: removed # --- End Embedded YAML
      • cephadm.wait_for_service_not_present:
        • service: smb.adctdb1
    • teuthology_branch: main
    • verbose: True
    • pcp_grafana_url:
    • priority: 70
    • user: gabrioux
    • queue:
    • posted: 2024-09-13 12:32:28
    • started: 2024-09-13 13:11:07
    • updated: 2024-09-13 13:54:30
    • status_class: danger
    • runtime: 0:43:23
    • wait_time: 0:12:05