- log_href:
http://qa-proxy.ceph.com/teuthology/yuriw-2020-05-29_17:25:28-rados:dashboard-wip-yuri5-testing-2020-05-29-1606-nautilus-distro-basic-smithi/5103817/teuthology.log
- archive_path:
/home/teuthworker/archive/yuriw-2020-05-29_17:25:28-rados:dashboard-wip-yuri5-testing-2020-05-29-1606-nautilus-distro-basic-smithi/5103817
- description:
rados:dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-zlib supported-random-distro$/{rhel_latest} tasks/dashboard}
- duration:
0:42:39
- email:
ceph-qa@ceph.io
- failure_reason:
- flavor:
basic
- job_id:
5103817
- kernel:
- last_in_suite:
False
- machine_type:
smithi
- name:
yuriw-2020-05-29_17:25:28-rados:dashboard-wip-yuri5-testing-2020-05-29-1606-nautilus-distro-basic-smithi
- nuke_on_error:
True
- os_type:
rhel
- os_version:
7.8
- overrides:
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- workunit:
- sha1:
068417065516411b3d9197bf4be0982125326560
- branch:
wip-yuri5-testing-2020-05-29-1606-nautilus
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- fs:
xfs
- conf:
- mds:
- client:
- debug ms:
1
- debug mgrc:
20
- debug client:
20
- osd:
- mon osd full ratio:
0.9
- bluestore compression algorithm:
zlib
- debug ms:
20
- debug mgrc:
20
- bluestore fsck on mount:
True
- debug osd:
25
- bluestore compression mode:
aggressive
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- mon osd backfillfull_ratio:
0.85
- bluestore block size:
96636764160
- debug filestore:
20
- debug rocksdb:
10
- mon osd nearfull ratio:
0.8
- osd failsafe full ratio:
0.95
- debug journal:
20
- mon:
- debug mon:
20
- debug paxos:
20
- debug ms:
1
- mgr:
- debug ms:
1
- debug mgr:
20
- sha1:
068417065516411b3d9197bf4be0982125326560
- install:
- ceph:
- sha1:
068417065516411b3d9197bf4be0982125326560
- admin_socket:
- branch:
wip-yuri5-testing-2020-05-29-1606-nautilus
- thrashosds:
- bdev_inject_crash_probability:
0.5
- bdev_inject_crash:
2
- owner:
scheduled_yuriw@teuthology
- pid:
- roles:
-
['mgr.x', 'mon.a', 'mon.c', 'mds.a', 'mds.c', 'osd.0', 'client.0']
-
['mgr.y', 'mgr.z', 'mon.b', 'mds.b', 'osd.1', 'osd.2', 'osd.3', 'client.1']
- sentry_event:
- status:
pass
- success:
True
- branch:
wip-yuri5-testing-2020-05-29-1606-nautilus
- seed:
- sha1:
068417065516411b3d9197bf4be0982125326560
- subset:
- suite:
- suite_branch:
wip-yuri5-testing-2020-05-29-1606-nautilus
- suite_path:
- suite_relpath:
- suite_repo:
- suite_sha1:
068417065516411b3d9197bf4be0982125326560
- targets:
- smithi137.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCZ8LR9VaJYybS3EdxGJflSmJxXvdvJI+INxO5j9hu4uoThMEcDy/6CnEGpiY1tHxcsb1sINYciXH8+larNv427TP/XOIklIvEwrn4s/v8md2XtghOLt56Kyhdyo55zGhRuT1VnrNkpWV00K2NhdJh7lUSVPrbKZ+sQllpdaj0/fuI7Dsk5U3ULNroV9ERcWzYLcuTdkNRiVvGs39OcbIiHFMpfYq53uGMF548Fej93NhvTH9F1Bw+GdVXIKx2azT0n/Ohz2NX6x9eMvghDIThpYJYheUzhfx1z2dmuqNXHkIHsyoMoFeHBZ/7VIL9ZPRfss1FSnKPk9xUj31Ckej5t
- smithi205.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuM99B1w/Sf6efNWso/BHkW4BBosR6RBilhU/0kzn8XkaUiaiF+3u74V3AO2p2zlLlQbJ77F1lScTOh0HLnoXdmfm2ABX00YD3y27peg64Q+JdOwGsZTHs9OthK6d0cjBxh4xymo1GYk13rl9DC7FGdlftFdNLvh0A5uKwxx/GOrxWG1I4patalx1yaZUpuOjNwCU4XCxebbMDkk2+n+A8CdZ54p4xNb7FLe9b3PmhbVuQKVcRla0EPD0yBTqTAb7hZNK0Uc8ymyv0NlEF4PYYpsyyJey2imXWq0uO5wajp2QvDLuqpur/6MEoRtR8xArszIWbuJUXMghBgspSVAbh
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
-
- ceph:
- fs:
xfs
- wait-for-scrub:
False
- sha1:
068417065516411b3d9197bf4be0982125326560
- cluster:
ceph
- log-whitelist:
-
overall HEALTH_
-
\(MGR_DOWN\)
-
\(PG_
-
replacing it with standby
-
No standby daemons available
-
\(FS_DEGRADED\)
-
\(MDS_FAILED\)
-
\(MDS_DEGRADED\)
-
\(FS_WITH_FAILED_MDS\)
-
\(MDS_DAMAGE\)
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
\(OSD_DOWN\)
-
\(OSD_HOST_DOWN\)
-
\(POOL_APP_NOT_ENABLED\)
-
pauserd,pausewr flag\(s\) set
-
Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running
-
evicting unresponsive client .+
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- conf:
- mds:
- client:
- debug ms:
1
- debug mgrc:
20
- debug client:
20
- osd:
- mon osd full ratio:
0.9
- bluestore compression algorithm:
zlib
- debug ms:
20
- debug mgrc:
20
- bluestore fsck on mount:
True
- debug osd:
25
- bluestore compression mode:
aggressive
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- mon osd backfillfull_ratio:
0.85
- bluestore block size:
96636764160
- debug filestore:
20
- debug rocksdb:
10
- mon osd nearfull ratio:
0.8
- osd failsafe full ratio:
0.95
- debug journal:
20
- mon:
- debug mon:
20
- debug paxos:
20
- debug ms:
1
- mgr:
- debug ms:
1
- debug mgr:
20
-
- rgw:
-
- cephfs_test_runner:
- modules:
-
tasks.mgr.test_dashboard
-
tasks.mgr.dashboard.test_auth
-
tasks.mgr.dashboard.test_cephfs
-
tasks.mgr.dashboard.test_cluster_configuration
-
tasks.mgr.dashboard.test_health
-
tasks.mgr.dashboard.test_host
-
tasks.mgr.dashboard.test_logs
-
tasks.mgr.dashboard.test_monitor
-
tasks.mgr.dashboard.test_osd
-
tasks.mgr.dashboard.test_perf_counters
-
tasks.mgr.dashboard.test_summary
-
tasks.mgr.dashboard.test_rgw
-
tasks.mgr.dashboard.test_rbd
-
tasks.mgr.dashboard.test_pool
-
tasks.mgr.dashboard.test_requests
-
tasks.mgr.dashboard.test_role
-
tasks.mgr.dashboard.test_settings
-
tasks.mgr.dashboard.test_user
-
tasks.mgr.dashboard.test_erasure_code_profile
-
tasks.mgr.dashboard.test_mgr_module
-
tasks.mgr.dashboard.test_ganesha
- fail_on_skip:
False
teuthology_branch:
py2
verbose:
True
pcp_grafana_url:
priority:
user:
queue:
posted:
2020-05-29 17:25:36
started:
2020-05-29 23:31:24
updated:
2020-05-30 00:27:24
status_class:
success
runtime:
0:56:00
wait_time:
0:13:21