- log_href:
http://qa-proxy.ceph.com/teuthology/ideepika-2021-06-16_14:36:44-rados:cephadm-wip-yuri7-testing-2021-06-08-0747-octopus-distro-basic-smithi/6175013/teuthology.log
- archive_path:
/home/teuthworker/archive/ideepika-2021-06-16_14:36:44-rados:cephadm-wip-yuri7-testing-2021-06-08-0747-octopus-distro-basic-smithi/6175013
- description:
rados:cephadm/with-work/{0-distro/centos_8.2_kubic_stable fixed-2 mode/packaged msgr/async-v2only start tasks/rados_api_tests}
- duration:
0:51:16
- email:
- failure_reason:
- flavor:
basic
- job_id:
6175013
- kernel:
- last_in_suite:
False
- machine_type:
smithi
- name:
ideepika-2021-06-16_14:36:44-rados:cephadm-wip-yuri7-testing-2021-06-08-0747-octopus-distro-basic-smithi
- nuke_on_error:
True
- os_type:
centos
- os_version:
8.2
- overrides:
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- selinux:
- whitelist:
-
scontext=system_u:system_r:logrotate_t:s0
- workunit:
- sha1:
98bd5bed7d730c046de4f79df1015450d9a39bd4
- branch:
wip-remove-bionic
- ceph:
- sha1:
8d06216f6dddb80abdd00fdeb5a0fdc62c959b54
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- conf:
- global:
- ms bind msgr1:
False
- ms type:
async
- ms bind msgr2:
True
- mgr:
- debug ms:
1
- debug mgr:
20
- client:
- mon:
- debug paxos:
20
- mon warn on pool no app:
False
- debug mon:
20
- debug ms:
1
- osd:
- debug osd:
20
- debug ms:
1
- osd shutdown pgref assert:
True
- log-ignorelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
reached quota
-
but it is still running
-
overall HEALTH_
-
\(POOL_FULL\)
-
\(SMALLER_PGP_NUM\)
-
\(CACHE_POOL_NO_HIT_SET\)
-
\(CACHE_POOL_NEAR_FULL\)
-
\(POOL_APP_NOT_ENABLED\)
-
\(PG_AVAILABILITY\)
-
\(PG_DEGRADED\)
- cephadm:
- cephadm_mode:
cephadm-package
- install:
- ceph:
- sha1:
8d06216f6dddb80abdd00fdeb5a0fdc62c959b54
- extra_packages:
- admin_socket:
- branch:
wip-yuri7-testing-2021-06-08-0747-octopus
- owner:
scheduled_ideepika@teuthology
- pid:
- roles:
-
['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'ceph.rgw.realm.zone.a', 'node-exporter.a', 'alertmanager.a']
-
['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b', 'ceph.iscsi.iscsi.a']
- sentry_event:
- status:
pass
- success:
True
- branch:
wip-yuri7-testing-2021-06-08-0747-octopus
- seed:
- sha1:
8d06216f6dddb80abdd00fdeb5a0fdc62c959b54
- subset:
- suite:
- suite_branch:
wip-remove-bionic
- suite_path:
- suite_relpath:
- suite_repo:
- suite_sha1:
98bd5bed7d730c046de4f79df1015450d9a39bd4
- targets:
- smithi068.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDiy1Pl5t/8wdyTPCGndprl/RHS6vLW5LUFmDeTyz2VOEMiIdl8+UUGZqnr5CwRna37LWz7wPU+9R0ZuZzVii+dpErBJPp/JmB1KrrxdPXjHkggCpoOv+w2Zv+1cdYtKMd9MRPVspwdDWhlw/rU/9JTM+yOUJcyFe1JVB34Ke3lRLJKADv6xg0serLQK5WYC2MHqfqPFTqn7lDyrRC96zX8B1LTtel159LR74E1s57jU3hhHfo8RLEbSVBkVqdItYcCGR8jOucA8Uhl5qHR6tJoygznfJqHNjPfoNipiMtUf0zQnDpa1yKNRTfaoiot9tnFYfC3O0ftqjiVJVgsbRvytTenjoVbGxBytvsZzf+k4pQwnyv0FQfPQ3yyYq7ZczMkAI3XmZXiKbnNoTiNegqqEa8L4paEFDUwsyWSvalwHj7F/8u7L+0yQQLeRWOeL/iX/pSYYyUolnHt/Crn30namW2qKtOhNOSia/LpmHhmXNbtH+6sDLH8xrIhF6GMbdk=
- smithi139.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCvsvgjUWTbL177PVF3Yyh7mIARKLbTLKh7OUv4gp+jKFphKaFSKdNa9i/Ao6FmVLFXXO1L5BHELDOwk9+2rEyQ0HlCvz9cnMUlCI47tG6di58XsXqG6Ikwyby41e4elGvzxX5O2b2QQUA/gjBgUtv5csdyb6hOwv1LwAVRg4Mr2+/keM+6SLMVxBwVLnF3wqAlPcEC/uweSmACYrpGKTZPResfVuX1JgcoC7FMavnVCid6p4mMmFi4lDGnwISnGojyI764TGIH8Yq6BwBG94HzuSADD72Y9FfOgdKFMQOUNtyJkdIs1/i7Ip+yarJWvZ+eoRRWTPIgEmsWWKVTcujXz4MZX0DMRAZ++kAWHTmAsh962kw2AF7pQI54L1/8VR9x8yjSyXT3R4slL4BV16nMO+SWODdDraoN7EngBworc0mmszJLQ4Keak6Ay3uAZ3Q7qyj4XZRoeBgc0HyDz0HDjH8fac6m7xd0HSmwQq99rGqX178iQjS+BvBgl6JA4Ts=
- tasks:
-
- internal.buildpackages_prep:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- exec:
- all:
-
echo -e "[[registry]]\nlocation = 'docker.io'\n\n[[registry.mirror]]\nlocation='docker-mirror.front.sepia.ceph.com:5000'\n" | sudo tee /etc/containers/registries.conf
-
sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
-
sudo dnf -y module disable container-tools
-
sudo dnf -y install 'dnf-command(copr)'
-
sudo dnf -y copr enable rhcontainerbot/container-selinux
-
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo
-
sudo dnf remove -y podman
-
sudo dnf -y install podman
-
sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
-
- install:
-
- cephadm:
- sha1:
8d06216f6dddb80abdd00fdeb5a0fdc62c959b54
- cephadm_mode:
cephadm-package
- cluster:
ceph
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- conf:
- global:
- ms bind msgr1:
False
- ms type:
async
- ms bind msgr2:
True
- mgr:
- debug ms:
1
- debug mgr:
20
- client:
- mon:
- debug paxos:
20
- mon warn on pool no app:
False
- debug mon:
20
- debug ms:
1
- osd:
- debug osd:
20
- debug ms:
1
- osd shutdown pgref assert:
True
- log-ignorelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
reached quota
-
but it is still running
-
overall HEALTH_
-
\(POOL_FULL\)
-
\(SMALLER_PGP_NUM\)
-
\(CACHE_POOL_NO_HIT_SET\)
-
\(CACHE_POOL_NEAR_FULL\)
-
\(POOL_APP_NOT_ENABLED\)
-
\(PG_AVAILABILITY\)
-
\(PG_DEGRADED\)
-
- workunit:
- clients:
- client.0:
-
rados/test.sh
-
rados/test_pool_quota.sh
- branch:
wip-remove-bionic
- sha1:
98bd5bed7d730c046de4f79df1015450d9a39bd4
teuthology_branch:
master
verbose:
True
pcp_grafana_url:
priority:
user:
queue:
posted:
2021-06-16 14:36:56
started:
2021-06-16 14:36:56
updated:
2021-06-16 15:37:59
status_class:
success
runtime:
1:01:03
wait_time:
0:09:47