- log_href:
http://qa-proxy.ceph.com/teuthology/yuriw-2023-02-20_23:16:20-rados-wip-yuri11-testing-2023-02-20-1329-distro-default-smithi/7181699/teuthology.log
- archive_path:
/home/teuthworker/archive/yuriw-2023-02-20_23:16:20-rados-wip-yuri11-testing-2023-02-20-1329-distro-default-smithi/7181699
- description:
rados/dashboard/{0-single-container-host debug/mgr mon_election/connectivity random-objectstore$/{bluestore-comp-zstd} tasks/dashboard}
- duration:
4911
- email:
yweinstein@radhat.com
- failure_reason:
- flavor:
default
- job_id:
7181699
- kernel:
- last_in_suite:
False
- machine_type:
smithi
- name:
yuriw-2023-02-20_23:16:20-rados-wip-yuri11-testing-2023-02-20-1329-distro-default-smithi
- nuke_on_error:
True
- os_type:
centos
- os_version:
8.stream
- overrides:
- admin_socket:
- branch:
wip-yuri11-testing-2023-02-20-1329
- ceph:
- conf:
- client:
- debug client:
20
- debug mgrc:
20
- debug ms:
1
- global:
- mon election default strategy:
3
- mds:
- mgr:
- debug client:
20
- debug mgr:
20
- debug ms:
1
- mon:
- debug mon:
20
- debug ms:
1
- debug paxos:
20
- osd:
- bluestore block size:
96636764160
- bluestore compression algorithm:
zstd
- bluestore compression mode:
aggressive
- bluestore fsck on mount:
True
- debug bluefs:
1/20
- debug bluestore:
1/20
- debug mgrc:
20
- debug ms:
1
- debug osd:
20
- debug rocksdb:
4/10
- mon osd backfillfull_ratio:
0.85
- mon osd full ratio:
0.9
- mon osd nearfull ratio:
0.8
- osd failsafe full ratio:
0.95
- osd mclock override recovery settings:
True
- osd objectstore:
bluestore
- flavor:
default
- fs:
xfs
- log-ignorelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- sha1:
16541dad5d08f0ee933bf6631e531036f3f057e4
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- install:
- ceph:
- flavor:
default
- sha1:
16541dad5d08f0ee933bf6631e531036f3f057e4
- selinux:
- whitelist:
-
scontext=system_u:system_r:logrotate_t:s0
- thrashosds:
- bdev_inject_crash:
2
- bdev_inject_crash_probability:
0.5
- workunit:
- branch:
wip-yuri11-testing-2023-02-20-1329
- sha1:
16541dad5d08f0ee933bf6631e531036f3f057e4
- owner:
scheduled_yuriw@teuthology
- pid:
32053
- roles:
-
['mgr.x', 'mon.a', 'mon.c', 'mds.a', 'mds.c', 'osd.0', 'client.0']
-
['mgr.y', 'mgr.z', 'mon.b', 'mds.b', 'osd.1', 'osd.2', 'osd.3', 'client.1']
- sentry_event:
- status:
pass
- success:
True
- branch:
wip-yuri11-testing-2023-02-20-1329
- seed:
- sha1:
16541dad5d08f0ee933bf6631e531036f3f057e4
- subset:
- suite:
- suite_branch:
wip-yuri11-testing-2023-02-20-1329
- suite_path:
- suite_relpath:
- suite_repo:
- suite_sha1:
16541dad5d08f0ee933bf6631e531036f3f057e4
- targets:
- smithi161.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQD08ZYWpYlCKVZZSYKrEUYc69tSrnftMzpqv5eL75VjQKCqit0sFQ2goNmHS1k2YQurKyB8z8GO4TJs4cXJB94zYvBp2jJ6ZuG1cdrybhuBgeZjl1VF5p8NhiB3oMbHDyRC6MSXTQ4IkDJ6VNCTS4w0VoOsuxtTYmMHVhfzmmYOUJkqKrkZgMPx71Mvg79LzSbh3f6a5ndI67yAYtVOny8FKioR4A7E4QHtXsNh3DFFWg9olPdYGzMpBD6q94OFdq9StvRpQsuaU6lbnbvA/i8WFAfbv4yb9CX/8Wm8hvkAb2vJHhWAuYJjy9OusKSz5Cs6/zuzec710ChLWAH0Pcb0FZpVD/U5Wg2YT3/T0jpOaYb4pz6MdoOeLATae0GrWWG343xHB/RE+5EPs7wUHWnr9ck5nJ47FAOSvTsM9snXNS4sCVm20FNA+q22AauqebVcmQfV9hIanxZFmWzxvnt/1vrVrKCqp2bgYnkvPho6dQaQKVRtU//U7F4oRGH4og8=
- smithi195.front.sepia.ceph.com:
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBhd0jsGDPVRp0ajH0OmbByjt3R0m0YJ00A/1mWEe/Juxe79pU6XWqTYHlYFxcfrKYWh/82Pt2t8bfsm+M0OVlY=
- tasks:
-
- internal.buildpackages_prep:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- pexec:
- all:
-
sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
-
sudo dnf -y module reset container-tools
-
sudo dnf -y module install container-tools
-
sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
-
sudo sed -i 's/runtime = "runc"/#runtime = "runc"/g' /usr/share/containers/containers.conf
-
sudo sed -i 's/#runtime = "crun"/runtime = "crun"/g' /usr/share/containers/containers.conf
-
- install:
-
- ceph:
- log-ignorelist:
-
overall HEALTH_
-
\(MGR_DOWN\)
-
\(PG_
-
replacing it with standby
-
No standby daemons available
-
\(FS_DEGRADED\)
-
\(MDS_FAILED\)
-
\(MDS_DEGRADED\)
-
\(FS_WITH_FAILED_MDS\)
-
\(MDS_DAMAGE\)
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
\(OSD_DOWN\)
-
\(OSD_HOST_DOWN\)
-
\(POOL_APP_NOT_ENABLED\)
-
\(OSDMAP_FLAGS\)
-
\(OSD_FLAGS\)
-
\(TELEMETRY_CHANGED\)
-
pauserd,pausewr flag\(s\) set
-
Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running
-
evicting unresponsive client .+
-
MON_DOWN
- wait-for-scrub:
False
-
- rgw:
-
- cephfs_test_runner:
- fail_on_skip:
False
- modules:
-
tasks.mgr.test_dashboard
-
tasks.mgr.dashboard.test_api
-
tasks.mgr.dashboard.test_auth
-
tasks.mgr.dashboard.test_cephfs
-
tasks.mgr.dashboard.test_cluster
-
tasks.mgr.dashboard.test_cluster_configuration
-
tasks.mgr.dashboard.test_crush_rule
-
tasks.mgr.dashboard.test_erasure_code_profile
-
tasks.mgr.dashboard.test_health
-
tasks.mgr.dashboard.test_host
-
tasks.mgr.dashboard.test_logs
-
tasks.mgr.dashboard.test_mgr_module
-
tasks.mgr.dashboard.test_monitor
-
tasks.mgr.dashboard.test_motd
-
tasks.mgr.dashboard.test_orchestrator
-
tasks.mgr.dashboard.test_osd
-
tasks.mgr.dashboard.test_perf_counters
-
tasks.mgr.dashboard.test_pool
-
tasks.mgr.dashboard.test_rbd
-
tasks.mgr.dashboard.test_rbd_mirroring
-
tasks.mgr.dashboard.test_requests
-
tasks.mgr.dashboard.test_rgw
-
tasks.mgr.dashboard.test_role
-
tasks.mgr.dashboard.test_settings
-
tasks.mgr.dashboard.test_summary
-
tasks.mgr.dashboard.test_telemetry
-
tasks.mgr.dashboard.test_user
teuthology_branch:
main
verbose:
True
pcp_grafana_url:
priority:
user:
queue:
posted:
2023-02-24 00:59:56
started:
updated:
2023-02-22 13:47:03
status_class:
success