Description: orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_mgr_ctdb_res_basic}

Log: http://qa-proxy.ceph.com/teuthology/phlogistonjohn-2024-08-14_22:46:36-orch:cephadm-wip-phlogistonjohn-testing-2024-08-14-1238-distro-default-smithi/7856196/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=9e035a1b4d9e49c6a734e56757fc5de3

Failure Reason:

['Failed to download remote objects and refs: remote: Internal Server Error', "fatal: unable to access 'https://github.com/ceph/keys/': The requested URL returned error: 500"]

  • log_href: http://qa-proxy.ceph.com/teuthology/phlogistonjohn-2024-08-14_22:46:36-orch:cephadm-wip-phlogistonjohn-testing-2024-08-14-1238-distro-default-smithi/7856196/teuthology.log
  • archive_path: /home/teuthworker/archive/phlogistonjohn-2024-08-14_22:46:36-orch:cephadm-wip-phlogistonjohn-testing-2024-08-14-1238-distro-default-smithi/7856196
  • description: orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_mgr_ctdb_res_basic}
  • duration: 0:12:13
  • email: jmulligan@redhat.com
  • failure_reason: ['Failed to download remote objects and refs: remote: Internal Server Error', "fatal: unable to access 'https://github.com/ceph/keys/': The requested URL returned error: 500"]
  • flavor:
  • job_id: 7856196
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: phlogistonjohn-2024-08-14_22:46:36-orch:cephadm-wip-phlogistonjohn-testing-2024-08-14-1238-distro-default-smithi
  • nuke_on_error:
  • os_type: ubuntu
  • os_version: 22.04
  • overrides:
    • admin_socket:
      • branch: wip-phlogistonjohn-testing-2024-08-14-1238
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • log-only-match:
        • CEPHADM_
      • sha1: c80c2f448753ce17d45e4a31131a51ec11baff18
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: c80c2f448753ce17d45e4a31131a51ec11baff18
      • workunit:
        • branch: wip-phlogistonjohn-testing-2024-08-14-1238
        • sha1: c80c2f448753ce17d45e4a31131a51ec11baff18
    • owner: scheduled_phlogistonjohn@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0']
      • ['host.b', 'mon.b', 'osd.2', 'osd.3']
      • ['host.c', 'mon.c', 'osd.4', 'osd.5']
      • ['host.d', 'cephadm.exclude']
    • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=9e035a1b4d9e49c6a734e56757fc5de3
    • status: dead
    • success: False
    • branch: wip-phlogistonjohn-testing-2024-08-14-1238
    • seed: 3866
    • sha1: c80c2f448753ce17d45e4a31131a51ec11baff18
    • subset:
    • suite: orch:cephadm
    • suite_branch: wip-phlogistonjohn-testing-2024-08-14-1238
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph-c_c80c2f448753ce17d45e4a31131a51ec11baff18/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph-ci.git
    • suite_sha1: c80c2f448753ce17d45e4a31131a51ec11baff18
    • targets:
      • smithi001.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBcz9wShEqLEMDE22pHXv4Xv6b5OaXCqcjcHiHv6/Y7pzz2q+kUaZuZ2dJkjfKKZYSX1jVgSg4s0kj9XpQJ0DfE=
      • smithi017.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDkBG61iTmQvAErYZh8Vgpx0EjoFuUYzwLyZodxvG+Mff/3efK0MFYBlsMqyv9fbixHNXFuL+8UifQ8ea5tIsyA=
      • smithi096.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDWUU+kSqaShA7nKc/eRdfcnwzfpWO4p52JUWyEJ9rItuQSXV5wWlI/BMvn435ngweYpiDI8bISM/SQUb+hIQxk=
      • smithi175.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNLyKtQU+Atrjc6WijbxUfyFxDawE5OAcPW8qX7xHRl6cPzp2jv9kvikObCQp/X8vQ9I7MTRKPGqRbHJy8aVQh4=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • cephadm.configure_samba_client_container:
        • role: host.d
      • cephadm:
      • cephadm.shell:
        • host.a:
          • ceph fs volume create cephfs
      • cephadm.wait_for_service:
        • service: mds.cephfs
      • cephadm.shell:
        • host.a:
          • cmd: ceph fs subvolumegroup create cephfs smb
          • cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777
          • cmd: ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777
          • cmd: ceph mgr module enable smb
          • cmd: sleep 30
          • cmd: ceph smb apply -i -
          • stdin: # --- Begin Embedded YAML - resource_type: ceph.smb.cluster cluster_id: uctdb1 auth_mode: user user_group_settings: - {source_type: resource, ref: ug1} placement: count: 3 - resource_type: ceph.smb.usersgroups users_groups_id: ug1 values: users: - {name: user1, password: t3stP4ss1} - {name: user2, password: t3stP4ss2} groups: [] - resource_type: ceph.smb.share cluster_id: uctdb1 share_id: share1 cephfs: volume: cephfs subvolumegroup: smb subvolume: sv1 path: / - resource_type: ceph.smb.share cluster_id: uctdb1 share_id: share2 cephfs: volume: cephfs subvolumegroup: smb subvolume: sv2 path: / # --- End Embedded YAML
      • cephadm.wait_for_service:
        • service: smb.uctdb1
      • cephadm.shell:
        • host.a:
          • cmd: rados --pool=.smb -N uctdb1 get cluster.meta.json /dev/stdout
      • cephadm.exec:
        • host.d:
          • sleep 30
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user2%t3stP4ss2 //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls
      • cephadm.exec:
        • host.a:
          • {{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith("smb.uctdb1")))[-1].name' > /tmp/svcname
          • {{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status
          • cat /tmp/ctdb_status
          • grep 'pnn:0 .*OK' /tmp/ctdb_status
          • grep 'pnn:1 .*OK' /tmp/ctdb_status
          • grep 'pnn:2 .*OK' /tmp/ctdb_status
          • grep 'Number of nodes:3' /tmp/ctdb_status
          • rm -rf /tmp/svcname /tmp/ctdb_status
      • cephadm.exec:
        • host.d:
          • sleep 30
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{'host.c'|role_to_remote|attr('ip_address')}}/share1 -c ls
          • {{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user2%t3stP4ss2 //{{'host.c'|role_to_remote|attr('ip_address')}}/share2 -c ls
      • cephadm.shell:
        • host.a:
          • cmd: ceph smb apply -i -
          • stdin: # --- Begin Embedded YAML - resource_type: ceph.smb.cluster cluster_id: uctdb1 intent: removed - resource_type: ceph.smb.usersgroups users_groups_id: ug1 intent: removed - resource_type: ceph.smb.share cluster_id: uctdb1 share_id: share1 intent: removed - resource_type: ceph.smb.share cluster_id: uctdb1 share_id: share2 intent: removed # --- End Embedded YAML
      • cephadm.wait_for_service_not_present:
        • service: smb.uctdb1
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 72
    • user: phlogistonjohn
    • queue:
    • posted: 2024-08-14 22:47:09
    • started: 2024-08-14 22:54:56
    • updated: 2024-08-14 23:18:02
    • status_class: danger
    • runtime: 0:23:06
    • wait_time: 0:10:53