- log_href:
http://qa-proxy.ceph.com/teuthology/yuriw-2022-06-21_16:32:27-rados-wip-yuri-testing-2022-06-21-0703-octopus-distro-default-smithi/6889807/teuthology.log
- archive_path:
/home/teuthworker/archive/yuriw-2022-06-21_16:32:27-rados-wip-yuri-testing-2022-06-21-0703-octopus-distro-default-smithi/6889807
- description:
rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/filestore-xfs supported-random-distro$/{rhel_8} tasks/dashboard}
- duration:
1:27:23
- email:
ceph-qa@ceph.io
- failure_reason:
- flavor:
default
- job_id:
6889807
- kernel:
- last_in_suite:
False
- machine_type:
smithi
- name:
yuriw-2022-06-21_16:32:27-rados-wip-yuri-testing-2022-06-21-0703-octopus-distro-default-smithi
- nuke_on_error:
True
- os_type:
rhel
- os_version:
8.3
- overrides:
- admin_socket:
- branch:
wip-yuri-testing-2022-06-21-0703-octopus
- ceph:
- conf:
- client:
- debug client:
20
- debug mgrc:
20
- debug ms:
1
- mds:
- mgr:
- debug client:
20
- debug mgr:
20
- debug ms:
1
- mon:
- debug mon:
20
- debug ms:
1
- debug paxos:
20
- osd:
- debug mgrc:
20
- debug ms:
1
- debug osd:
20
- osd objectstore:
filestore
- osd sloppy crc:
True
- flavor:
default
- fs:
xfs
- log-ignorelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- sha1:
35f0662e634bcf40dc54a65059a429a167ae0774
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- osd objectstore:
filestore
- osd sloppy crc:
True
- filestore:
True
- fs:
xfs
- install:
- ceph:
- flavor:
default
- sha1:
35f0662e634bcf40dc54a65059a429a167ae0774
- selinux:
- whitelist:
-
scontext=system_u:system_r:logrotate_t:s0
- workunit:
- branch:
wip-yuri-testing-2022-06-21-0703-octopus
- sha1:
35f0662e634bcf40dc54a65059a429a167ae0774
- owner:
scheduled_yuriw@teuthology
- pid:
- roles:
-
['mgr.x', 'mon.a', 'mon.c', 'mds.a', 'mds.c', 'osd.0', 'client.0']
-
['mgr.y', 'mgr.z', 'mon.b', 'mds.b', 'osd.1', 'osd.2', 'osd.3', 'client.1']
- sentry_event:
- status:
pass
- success:
True
- branch:
wip-yuri-testing-2022-06-21-0703-octopus
- seed:
- sha1:
35f0662e634bcf40dc54a65059a429a167ae0774
- subset:
- suite:
- suite_branch:
wip-yuri-testing-2022-06-21-0703-octopus
- suite_path:
- suite_relpath:
- suite_repo:
- suite_sha1:
35f0662e634bcf40dc54a65059a429a167ae0774
- targets:
- smithi089.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9JNwdallDeUvbo8sgzCNMq0zG0rvmp9oE6vbW58c9yyPBjlEAKxDO1c5mWwYI6UY0/5iZ3WCBgEuN23XBXRhG+z4HtCHiWQ6s5iO6DzBbx1CsKe65Hia29BuDv2ZQwByWPoSIsccjwdLnITA/13nOsSBIMfgQdEJRY3pKKowkr9gF/rFz0KgSHPYmtE4JNbKSi+GRQWNE/n+uO5EFooQjJIggRTS4TkaqbZAJ9sNtDvuiIyNzbvbosQGivZSlYCcmeypPMR/4Rpyzy7wmfL9fleud9cwt3nMp86NNj9GM2mf1QoivrZlBSEDGhSri/sUEYcQt9e1/nASDLTblCJ4JHcnjXX9dHV74ZyX+SS6Aqx/G9P7fR70JibaXOb96Jr1RUxT3CqlW5oGclKqzhw5oVXqhTtE+xVQKME9XvL+uteCkDGr3ORI0mfHYn+0Ymg9luTNwG51wnorodC5atvfNI2wIcSeo5vJV+wMqXdFnVyBUTz1A/JobIRIxP3Yosjk=
- smithi125.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCcthUtykekdQiUTJKPq71byO0sldari8liCuTB5gDNUsSpWViuMTajkpRrYePCtNc4tYyoUCOwLLlf9RJvpojLCjWiHmKhabVs894vYfREYrorcdQIQcAqMrfShhpXmG0sGVLuwsKIM0cU9MrWff/aAmx9ah7WWffiWMLNuoKUZnkxEsOEYIAsYdi94tobjsYKoW9jhZOUX4ta8vUiFmprwMQ59wGtdbKTMLcdU704kFPKNbfAtQ2UZKnbdGcYNK49+TilzmTR73CwgbUY5THDMH5YrT8fGC0VWSZkyrOs+MN6rDwCu0lTM3IRkDJDEKmrHfmzwuTQ5W/6PccFs7CIoY9oBzkpbvAy+C57/MNVywOJDZZ/A13W/9gyITQgaFKsmWdzSXBDMWgvyP2pCPZWYQUvNHVk1k07ukdOfXunh/PPRcGZbWzxUDgg85junjm8WvBIpvH8xDSy3Uo/YBgI09v9b4VPrMiFntR5YjS5h9DwlrujRiPRgMVMR9UlRgM=
- tasks:
-
- internal.buildpackages_prep:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
-
- ceph:
- log-ignorelist:
-
overall HEALTH_
-
\(MGR_DOWN\)
-
\(PG_
-
replacing it with standby
-
No standby daemons available
-
\(FS_DEGRADED\)
-
\(MDS_FAILED\)
-
\(MDS_DEGRADED\)
-
\(FS_WITH_FAILED_MDS\)
-
\(MDS_DAMAGE\)
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
\(OSD_DOWN\)
-
\(OSD_HOST_DOWN\)
-
\(POOL_APP_NOT_ENABLED\)
-
\(OSDMAP_FLAGS\)
-
\(OSD_FLAGS\)
-
\(TELEMETRY_CHANGED\)
-
pauserd,pausewr flag\(s\) set
-
Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running
-
evicting unresponsive client .+
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- wait-for-scrub:
False
- conf:
- client:
- debug client:
20
- debug mgrc:
20
- debug ms:
1
- mds:
- mgr:
- debug client:
20
- debug mgr:
20
- debug ms:
1
- mon:
- debug mon:
20
- debug ms:
1
- debug paxos:
20
- osd:
- debug mgrc:
20
- debug ms:
1
- debug osd:
20
- osd objectstore:
filestore
- osd sloppy crc:
True
- flavor:
default
- fs:
xfs
- sha1:
35f0662e634bcf40dc54a65059a429a167ae0774
- cluster:
ceph
-
- rgw:
-
- cephfs_test_runner:
- fail_on_skip:
False
- modules:
-
tasks.mgr.test_dashboard
-
tasks.mgr.dashboard.test_auth
-
tasks.mgr.dashboard.test_cephfs
-
tasks.mgr.dashboard.test_cluster_configuration
-
tasks.mgr.dashboard.test_crush_rule
-
tasks.mgr.dashboard.test_erasure_code_profile
-
tasks.mgr.dashboard.test_ganesha
-
tasks.mgr.dashboard.test_health
-
tasks.mgr.dashboard.test_host
-
tasks.mgr.dashboard.test_logs
-
tasks.mgr.dashboard.test_mgr_module
-
tasks.mgr.dashboard.test_monitor
-
tasks.mgr.dashboard.test_orchestrator
-
tasks.mgr.dashboard.test_osd
-
tasks.mgr.dashboard.test_perf_counters
-
tasks.mgr.dashboard.test_pool
-
tasks.mgr.dashboard.test_rbd
-
tasks.mgr.dashboard.test_rbd_mirroring
-
tasks.mgr.dashboard.test_requests
-
tasks.mgr.dashboard.test_rgw
-
tasks.mgr.dashboard.test_role
-
tasks.mgr.dashboard.test_settings
-
tasks.mgr.dashboard.test_summary
-
tasks.mgr.dashboard.test_telemetry
-
tasks.mgr.dashboard.test_user
-
tasks.mgr.dashboard.test_motd
teuthology_branch:
main
verbose:
True
pcp_grafana_url:
priority:
user:
queue:
posted:
2022-06-21 16:35:15
started:
2022-06-21 19:16:27
updated:
2022-06-21 20:49:48
status_class:
success
runtime:
1:33:21
wait_time:
0:05:58