- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
-
- ceph:
-
- ceph-fuse:
-
- cephfs_test_runner:
- modules:
-
tasks.cephfs.test_recovery_pool
verbose:
True
pid:
duration:
0:08:43
owner:
scheduled_yuriw@teuthology
flavor:
basic
status_class:
success
targets:
- smithi198.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgn3oq8+ZBqQCbeqR4wVjE+1sBcY88SIpwiga5P3mweHDm01Ea7Bc8vT+BuN0w5b8lBzPgTih+drhTzhVgEuSn/Nl0xtvqIzhRdBQSDRib5K7uCBGwxbqKrQGPXncTOX+mSrFTnHq54hgLK4y1nnd37/4wDo2aB0z9Q7WoJr8QIuOZ26oFkRdnChUHbvPBiVaOxUvJEuzt1ESoogtSBIwMzuIeQPFhg6OKCeGOqal2ksseji+pXCeqfhPxzD2lR8BrnZYmfWiiBgVIm+7SgIqCIQlZg+7IpZj0c8JSmvddhOz8lzIyA86N6ygJNRF9hmC3RnH7OeF92sTlQVhxX6x9
- smithi061.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDpkQTUkqmrLxbox05SEhJGhh7JEuERS2j0Xz04rMX/VjuHjj+Aa96la4FgBJw8bnAJH0qVpv3uJmGTLbPtYcnQGQsi29JTo+WclauYC6WNSho3SEIZQNIVN+lL4rk9mKlpUP3hu5PhcJjarSQR8zrlv9Ks4eI1mdwpcd1I3jvUTyxWMgqFr1mpLQHm3AJqW72smQA5sTj/Hkf15+1uH3KIMD8R0M7VoA6oieUdHo8t0W1LQ/0ZOckdkSJAfD6fWP0i/aWmZEMm+339bb2kWsyobfu8k2vbBHTaZ4c0lF+gAJJIw9MKwom10Oq4lpOnz5xCaczx8NGc/Opy5R4mzpDJ
job_id:
3236638
log_href:
http://qa-proxy.ceph.com/teuthology/yuriw-2018-11-08_16:24:27-fs-luminous-distro-basic-smithi/3236638/teuthology.log
suite_branch:
luminous
wait_time:
0:13:16
os_version:
16.04
branch:
luminous
pcp_grafana_url:
email:
ceph-qa@lists.ceph.com
archive_path:
/home/teuthworker/archive/yuriw-2018-11-08_16:24:27-fs-luminous-distro-basic-smithi/3236638
updated:
2018-11-08 17:31:18
description:
fs/basic_functional/{begin.yaml clusters/4-remote-clients.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore/bluestore-ec-root.yaml overrides/{frag_enable.yaml no_client_pidfile.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/alternate-pool.yaml}
started:
2018-11-08 17:09:19
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2018-11-08 16:24:32
teuthology_branch:
master
sha1:
8157642b94a60dbfc3c88529a543a094d45d2b5e
name:
yuriw-2018-11-08_16:24:27-fs-luminous-distro-basic-smithi
roles:
-
[u'mon.a', u'mgr.x', u'osd.0', u'osd.1', u'osd.2', u'osd.3', u'mds.a', u'mds.b', u'client.1', u'client.2', u'client.3']
-
[u'client.0', u'osd.4', u'osd.5', u'osd.6', u'osd.7']
overrides:
- ceph-deploy:
- fs:
xfs
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- mon osd full ratio:
0.9
- mon osd backfillfull_ratio:
0.85
- bluestore fsck on mount:
True
- mon osd nearfull ratio:
0.8
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- bluestore block size:
96636764160
- debug rocksdb:
10
- osd failsafe full ratio:
0.95
- bluestore:
True
- workunit:
- sha1:
9386263faf9b4e086ee496067fe4bc16030309b4
- branch:
luminous
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
overall HEALTH_
-
\(FS_DEGRADED\)
-
\(MDS_FAILED\)
-
\(MDS_DEGRADED\)
-
\(FS_WITH_FAILED_MDS\)
-
\(MDS_DAMAGE\)
-
overall HEALTH_
-
\(OSD_DOWN\)
-
\(OSD_
-
but it is still running
-
is not responding
-
bad backtrace
-
object missing on disk
-
error reading table object
-
error reading sessionmap
-
unmatched fragstat
-
unmatched rstat
-
was unreadable, recreating it now
-
Scrub error on inode
-
Metadata damage detected
-
MDS_FAILED
-
MDS_DAMAGE
- sha1:
8157642b94a60dbfc3c88529a543a094d45d2b5e
- fs:
xfs
- conf:
- mds:
- mds bal split bits:
3
- mds bal split size:
100
- osd op complaint time:
180
- debug mds:
20
- mds bal merge size:
5
- debug ms:
1
- mds bal frag:
True
- mds verify scatter:
True
- mds bal fragment size max:
10000
- mds op complaint time:
180
- mds debug scatterstat:
True
- mds debug frag:
True
- client:
- debug ms:
1
- pid file:
- debug client:
20
- client mount timeout:
600
- mon:
- debug mon:
20
- debug paxos:
20
- mon op complaint time:
120
- debug ms:
1
- osd:
- mon osd full ratio:
0.9
- debug ms:
1
- debug journal:
20
- debug osd:
25
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- mon osd backfillfull_ratio:
0.85
- osd op complaint time:
180
- bluestore block size:
96636764160
- debug filestore:
20
- debug rocksdb:
10
- mon osd nearfull ratio:
0.8
- osd failsafe full ratio:
0.95
- bluestore fsck on mount:
True
- cephfs_ec_profile:
-
m=2
-
k=2
-
crush-failure-domain=osd
- install:
- ceph:
- sha1:
8157642b94a60dbfc3c88529a543a094d45d2b5e
- admin_socket:
- thrashosds:
- bdev_inject_crash_probability:
0.5
- bdev_inject_crash:
2
success:
True
failure_reason:
status:
pass
nuke_on_error:
True
os_type:
ubuntu
runtime:
0:21:59
suite_sha1:
9386263faf9b4e086ee496067fe4bc16030309b4