- log_href:
http://qa-proxy.ceph.com/teuthology/adking-2024-09-10_02:59:23-orch:cephadm-wip-adk-testing-2024-09-09-1904-distro-default-smithi/7898620/teuthology.log
- archive_path:
/home/teuthworker/archive/adking-2024-09-10_02:59:23-orch:cephadm-wip-adk-testing-2024-09-09-1904-distro-default-smithi/7898620
- description:
orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_mgr_ctdb_res_basic}
- duration:
0:15:07
- email:
adking@redhat.com
- failure_reason:
- flavor:
- job_id:
7898620
- kernel:
- last_in_suite:
False
- machine_type:
smithi
- name:
adking-2024-09-10_02:59:23-orch:cephadm-wip-adk-testing-2024-09-09-1904-distro-default-smithi
- nuke_on_error:
- os_type:
centos
- os_version:
9.stream
- overrides:
- admin_socket:
- branch:
wip-adk-testing-2024-09-09-1904
- ceph:
- conf:
- mgr:
- debug mgr:
20
- debug ms:
1
- mon:
- debug mon:
20
- debug ms:
1
- debug paxos:
20
- osd:
- debug ms:
1
- debug osd:
20
- flavor:
default
- log-ignorelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- log-only-match:
- sha1:
3a5e3f16720f432aea965c40a8f6eb480095ec6c
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- install:
- ceph:
- flavor:
default
- sha1:
3a5e3f16720f432aea965c40a8f6eb480095ec6c
- selinux:
- allowlist:
-
scontext=system_u:system_r:logrotate_t:s0
- workunit:
- branch:
wip-adk-testing-2024-09-09-1904
- sha1:
3a5e3f16720f432aea965c40a8f6eb480095ec6c
- owner:
scheduled_adking@teuthology
- pid:
- roles:
-
['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0']
-
['host.b', 'mon.b', 'osd.2', 'osd.3']
-
['host.c', 'mon.c', 'osd.4', 'osd.5']
-
['host.d', 'cephadm.exclude']
- sentry_event:
- status:
pass
- success:
True
- branch:
wip-adk-testing-2024-09-09-1904
- seed:
6964
- sha1:
3a5e3f16720f432aea965c40a8f6eb480095ec6c
- subset:
1/20
- suite:
orch:cephadm
- suite_branch:
wip-adk-testing-2024-09-09-1904
- suite_path:
/home/teuthworker/src/git.ceph.com_ceph-c_3a5e3f16720f432aea965c40a8f6eb480095ec6c/qa
- suite_relpath:
qa
- suite_repo:
https://git.ceph.com/ceph-ci.git
- suite_sha1:
3a5e3f16720f432aea965c40a8f6eb480095ec6c
- targets:
- smithi003.front.sepia.ceph.com:
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEhkn79DlNwBwzHB95gOHwg/ntFgHTNRT9bgDIFgJj9+84bawdDLSG27FlLttSQK5Hop3l/vvh5b8A+tdk8t850=
- smithi097.front.sepia.ceph.com:
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJ1PlJUc1HD4hhhZdJKEHJpBUmu+UUQnnJcAfz+Yh4UR7n9jn8OttvAcXm8ON7cUF2qiZ7EQdKPFV9mZ8EYESjY=
- smithi135.front.sepia.ceph.com:
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBP/jW4Y71KmsTvW0feyfE8vCk+o8ePcKqAbkjySgy0I6jvXiJxMjg4SnpRaXhde4WzfNU0MdZr4zGnHpnDmIOfY=
- smithi142.front.sepia.ceph.com:
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBmP0aHe7CvuKGicenrHdP9uCBxW/5u68i05qZThj+RaigZuXnBroX52l4C3TdESRt0qYLAF1oxhYQujfzAnXnk=
- tasks:
-
- internal.buildpackages_prep:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- pexec:
- all:
-
sudo dnf remove nvme-cli -y
-
sudo dnf install runc nvmetcli nvme-cli -y
-
sudo sed -i 's/^#runtime = "crun"/runtime = "runc"/g' /usr/share/containers/containers.conf
-
sudo sed -i 's/runtime = "crun"/#runtime = "crun"/g' /usr/share/containers/containers.conf
-
- cephadm.configure_samba_client_container:
-
- cephadm:
-
- cephadm.shell:
- host.a:
-
ceph fs volume create cephfs
-
- cephadm.wait_for_service:
-
- cephadm.shell:
- host.a:
-
- cmd:
ceph fs subvolumegroup create cephfs smb
-
- cmd:
ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777
-
- cmd:
ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777
-
- cmd:
ceph mgr module enable smb
-
- cmd:
sleep 30
-
- cmd:
ceph smb apply -i -
- stdin:
# --- Begin Embedded YAML
- resource_type: ceph.smb.cluster
cluster_id: uctdb1
auth_mode: user
user_group_settings:
- {source_type: resource, ref: ug1}
placement:
count: 3
- resource_type: ceph.smb.usersgroups
users_groups_id: ug1
values:
users:
- {name: user1, password: t3stP4ss1}
- {name: user2, password: t3stP4ss2}
groups: []
- resource_type: ceph.smb.share
cluster_id: uctdb1
share_id: share1
cephfs:
volume: cephfs
subvolumegroup: smb
subvolume: sv1
path: /
- resource_type: ceph.smb.share
cluster_id: uctdb1
share_id: share2
cephfs:
volume: cephfs
subvolumegroup: smb
subvolume: sv2
path: /
# --- End Embedded YAML
cephadm.wait_for_service:
cephadm.shell:
- host.a:
-
- cmd:
rados --pool=.smb -N uctdb1 get cluster.meta.json /dev/stdout
cephadm.exec:
- host.d:
-
sleep 30
-
{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls
-
{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user2%t3stP4ss2 //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls
cephadm.exec:
- host.a:
-
{{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith("smb.uctdb1")))[-1].name' > /tmp/svcname
-
{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status
-
cat /tmp/ctdb_status
-
grep 'pnn:0 .*OK' /tmp/ctdb_status
-
grep 'pnn:1 .*OK' /tmp/ctdb_status
-
grep 'pnn:2 .*OK' /tmp/ctdb_status
-
grep 'Number of nodes:3' /tmp/ctdb_status
-
rm -rf /tmp/svcname /tmp/ctdb_status
cephadm.exec:
- host.d:
-
sleep 30
-
{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{'host.c'|role_to_remote|attr('ip_address')}}/share1 -c ls
-
{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user2%t3stP4ss2 //{{'host.c'|role_to_remote|attr('ip_address')}}/share2 -c ls
cephadm.shell:
- host.a:
-
- cmd:
ceph smb apply -i -
- stdin:
# --- Begin Embedded YAML
- resource_type: ceph.smb.cluster
cluster_id: uctdb1
intent: removed
- resource_type: ceph.smb.usersgroups
users_groups_id: ug1
intent: removed
- resource_type: ceph.smb.share
cluster_id: uctdb1
share_id: share1
intent: removed
- resource_type: ceph.smb.share
cluster_id: uctdb1
share_id: share2
intent: removed
# --- End Embedded YAML
cephadm.wait_for_service_not_present:
teuthology_branch:
main
verbose:
False
pcp_grafana_url:
priority:
80
user:
adking
queue:
posted:
2024-09-10 03:00:52
started:
2024-09-10 03:49:59
updated:
2024-09-10 04:16:14
status_class:
success
runtime:
0:26:15
wait_time:
0:11:08