- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
- extra_system_packages:
- deb:
-
bison
-
flex
-
libelf-dev
-
libssl-dev
- rpm:
-
bison
-
flex
-
elfutils-libelf-devel
-
openssl-devel
- extra_packages:
- deb:
-
python3-cephfs
-
cephfs-shell
- rpm:
- sha1:
9d23144cbeab8f881d0cc622e13f85f85ff8f351
-
- ceph:
-
- ceph-fuse:
-
- exec:
- mon.a:
-
ceph config set mgr mgr/crash/warn_recent_interval 0
-
- cephfs_test_runner:
- modules:
-
tasks.cephfs.test_failover
verbose:
True
pid:
duration:
0:57:26
owner:
scheduled_teuthology@teuthology
flavor:
basic
status_class:
success
targets:
- smithi014.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDCilQImofyX6cEQMLK0ti2pAXn8lsDRLN1lmnm7P73mEEtP4zF+5S+5qlnEl22M+Cm7C+tZGePrD6N9tH7ryC/j63pF0plLoB/ZaS600Ok/uVLgG+skjE+Jl4F9gz3bJdMVrNnGghTFA9CJW90O7CrjqMVQYZ2B0Aa8BG6lnX86BLFUVRCzJp1fxwJ6hG5BJ/P6uMc193iA/bbsKAi87Ych+DnIXDN/Auy5jWolk8B/Cm/ipXIWSx8lGep+jh881jrJMsUwvfKgWwa6lysa9x4CQFnHyo8Lao1ETPhnY+S6kmg65Ryo2OpiLHaV4UEn4PYS3/4qYsVr5p3dDSXvnWF
- smithi042.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDRsMdNoXYyrUyC3bN6gFP9WlCeHJ4fpPcuNuzFi2H2d/Gw2GJX93oy9XIe3RNT93D9QEX0ZGTB0enDKwXLtSbihIoyIU2Lb3H7cRscULYe85dfgLucgIyu39aS3cRL4HjyUbntKUxg/CDL3BjbhRh7U1ynSKO2uBW8kIcP4vxhIg1EjxwkxzN0UDJtmTi6U54GSUs0/3nwpnTxC2Mn21TCSDnZUobkeVo4MsOdT/QzBEYLpLWvjEtnlsqek+l6LRfsJkNRTkfTdM99TRMj5eD77/uUPZ2pBgz8T4Lxbiywdoof+yXIYEd/hNNRA6d+ckpcXS7rzihH7VY5Ee2ntwfj
job_id:
4481696
log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2019-11-08_03:15:03-fs-master-distro-basic-smithi/4481696/teuthology.log
suite_branch:
master
wait_time:
0:34:35
os_version:
7.7
branch:
master
pcp_grafana_url:
email:
ceph-qa@ceph.io
archive_path:
/home/teuthworker/archive/teuthology-2019-11-08_03:15:03-fs-master-distro-basic-smithi/4481696
updated:
2019-11-08 14:39:03
description:
fs/multifs/{begin.yaml clusters/1a3s-mds-2c-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-bitmap.yaml overrides/{frag_enable.yaml mon-debug.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_7.yaml} tasks/failover.yaml}
started:
2019-11-08 13:07:02
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2019-11-08 03:15:42
teuthology_branch:
master
sha1:
9d23144cbeab8f881d0cc622e13f85f85ff8f351
name:
teuthology-2019-11-08_03:15:03-fs-master-distro-basic-smithi
roles:
-
[u'mon.a', u'mgr.y', u'mds.a', u'mds.c', u'osd.0', u'osd.1', u'osd.2', u'osd.3', u'client.0']
-
[u'mon.b', u'mon.c', u'mgr.x', u'mds.b', u'mds.d', u'osd.4', u'osd.5', u'osd.6', u'osd.7', u'client.1']
overrides:
- ceph-deploy:
- fs:
xfs
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- mon osd full ratio:
0.9
- mon osd backfillfull_ratio:
0.85
- bluestore fsck on mount:
True
- mon osd nearfull ratio:
0.8
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- bluestore block size:
96636764160
- debug rocksdb:
10
- bdev enable discard:
True
- osd failsafe full ratio:
0.95
- bdev async discard:
True
- bluestore:
True
- ceph-fuse:
- workunit:
- sha1:
9d23144cbeab8f881d0cc622e13f85f85ff8f351
- branch:
master
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
overall HEALTH_
-
\(FS_DEGRADED\)
-
\(MDS_FAILED\)
-
\(MDS_DEGRADED\)
-
\(FS_WITH_FAILED_MDS\)
-
\(MDS_DAMAGE\)
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
\(FS_INLINE_DATA_DEPRECATED\)
-
overall HEALTH_
-
\(OSD_DOWN\)
-
\(OSD_
-
but it is still running
-
is not responding
-
not responding, replacing
-
\(MDS_INSUFFICIENT_STANDBY\)
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- fs:
xfs
- conf:
- mds:
- mds bal split bits:
3
- mds bal split size:
100
- osd op complaint time:
180
- debug mds:
20
- mds bal merge size:
5
- debug ms:
1
- mds bal frag:
True
- mds verify scatter:
True
- mds bal fragment size max:
10000
- mds op complaint time:
180
- mds debug scatterstat:
True
- mds debug frag:
True
- client:
- debug ms:
1
- debug client:
20
- client mount timeout:
600
- mon:
- debug mon:
20
- debug paxos:
20
- mon op complaint time:
120
- debug ms:
1
- osd:
- mon osd full ratio:
0.9
- debug ms:
1
- bdev enable discard:
True
- bluestore fsck on mount:
True
- debug osd:
25
- osd op complaint time:
180
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- bdev async discard:
True
- bluestore allocator:
bitmap
- osd failsafe full ratio:
0.95
- bluestore block size:
96636764160
- debug filestore:
20
- debug rocksdb:
10
- mon osd nearfull ratio:
0.8
- mon osd backfillfull_ratio:
0.85
- debug journal:
20
- sha1:
9d23144cbeab8f881d0cc622e13f85f85ff8f351
- install:
- ceph:
- sha1:
9d23144cbeab8f881d0cc622e13f85f85ff8f351
- admin_socket:
- thrashosds:
- bdev_inject_crash_probability:
0.5
- bdev_inject_crash:
2
success:
True
failure_reason:
status:
pass
nuke_on_error:
True
os_type:
rhel
runtime:
1:32:01
suite_sha1:
9d23144cbeab8f881d0cc622e13f85f85ff8f351