Description: orch/cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2024-04-19_21:08:02-orch-squid-distro-default-smithi/7663856/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2024-04-19_21:08:02-orch-squid-distro-default-smithi/7663856/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2024-04-19_21:08:02-orch-squid-distro-default-smithi/7663856
  • description: orch/cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain}
  • duration: 0:08:45
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor:
  • job_id: 7663856
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2024-04-19_21:08:02-orch-squid-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: squid
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • log-only-match:
        • CEPHADM_
      • sha1: d0bf63dd179c05751e4be49477389aa290c73089
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: d0bf63dd179c05751e4be49477389aa290c73089
      • selinux:
        • allowlist:
          • scontext=system_u:system_r:logrotate_t:s0
      • workunit:
        • branch: squid
        • sha1: e02d68a84525e0fa855f080a47dd96b350c21c42
    • owner: scheduled_teuthology@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0']
      • ['host.b', 'cephadm.exclude']
    • sentry_event:
    • status: pass
    • success: True
    • branch: squid
    • seed: 5868
    • sha1: d0bf63dd179c05751e4be49477389aa290c73089
    • subset: 38/64
    • suite: orch
    • suite_branch: squid
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph_e02d68a84525e0fa855f080a47dd96b350c21c42/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph.git
    • suite_sha1: e02d68a84525e0fa855f080a47dd96b350c21c42
    • targets:
      • smithi113.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBM6sQ0m+NEpxvRbCfZWR++aAPsB9SHeTLrrQlOM/NNrcM8xc+8SehmKFfvjPyd1xT4r9g0K/4YiLWVEoHnpEKEs=
      • smithi178.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBN9jzNW6N2yK5CjPNIwnjzkJMj2bXSYGowCnQfZUColm9Qg9tr3mtuMBRlhQSNfn8vzG5s4DYh/AW0w0zmXu2p0=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • pexec:
        • all:
          • sudo dnf install runc -y
          • sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf
          • sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf
      • cephadm.deploy_samba_ad_dc:
        • role: host.b
      • cephadm:
      • cephadm.shell:
        • host.a:
          • ceph fs volume create cephfs
      • cephadm.wait_for_service:
        • service: mds.cephfs
      • cephadm.shell:
        • host.a:
          • cmd: ceph fs subvolumegroup create cephfs g1
          • cmd: ceph fs subvolume create cephfs sub1 --group-name=g1 --mode=0777
          • cmd: ceph fs authorize cephfs client.smbdata / rw
          • cmd: ceph osd pool create .smb --yes-i-really-mean-it
          • cmd: ceph osd pool application enable .smb smb
          • cmd: rados --pool=.smb --namespace=admem1 put conf.toml /dev/stdin
          • stdin: samba-container-config = "v0" [configs.admem1] shares = ["share1"] globals = ["default", "domain"] instance_name = "SAMBA" [shares.share1.options] "vfs objects" = "ceph" path = "/" "ceph:config_file" = "/etc/ceph/ceph.conf" "ceph:user_id" = "smbdata" "kernel share modes" = "no" "read only" = "no" "browseable" = "yes" [globals.default.options] "server min protocol" = "SMB2" "load printers" = "no" "printing" = "bsd" "printcap name" = "/dev/null" "disable spoolss" = "yes" "guest ok" = "no" [globals.domain.options] security = "ads" workgroup = "DOMAIN1" realm = "domain1.sink.test" "idmap config * : backend" = "autorid" "idmap config * : range" = "2000-9999999"
          • cmd: ceph config-key set smb/config/admem1/join1.json -i -
          • stdin: {"username": "Administrator", "password": "Passw0rd"}
      • cephadm.apply:
        • specs:
          • cluster_id: admem1
          • config_uri: rados://.smb/admem1/conf.toml
          • custom_dns:
            • 172.21.15.178
          • features:
            • domain
          • include_ceph_users:
            • client.smbdata
          • join_sources:
            • rados:mon-config-key:smb/config/admem1/join1.json
          • placement:
            • count: 1
          • service_id: admem1
          • service_type: smb
      • cephadm.wait_for_service:
        • service: smb.admem1
      • cephadm.exec:
        • host.b:
          • sleep 30
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 100
    • user: teuthology
    • queue:
    • posted: 2024-04-19 21:10:41
    • started: 2024-04-19 21:58:04
    • updated: 2024-04-19 22:14:35
    • status_class: success
    • runtime: 0:16:31
    • wait_time: 0:07:46