- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
-
- ceph:
-
- exec:
- client.0:
-
sudo ceph osd pool create cache 4
-
sudo ceph osd tier add rbd cache
-
sudo ceph osd tier cache-mode cache writeback
-
sudo ceph osd tier set-overlay rbd cache
-
sudo ceph osd pool set cache hit_set_type bloom
-
sudo ceph osd pool set cache hit_set_count 8
-
sudo ceph osd pool set cache hit_set_period 60
-
sudo ceph osd pool set cache target_max_objects 250
-
- thrashosds:
- dump_ops_enable:
true
- bdev_inject_crash:
2
- optrack_toggle_delay:
2.0
- random_eio:
0.0
- bdev_inject_crash_probability:
0.5
- noscrub_toggle_delay:
2.0
- timeout:
1200
- sighup_delay:
0.1
-
- rbd_fsx:
verbose:
True
pid:
duration:
0:15:03
owner:
scheduled_teuthology@teuthology
flavor:
basic
status_class:
success
targets:
- smithi088.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCq2TlmOHNBgDlIiusbyjABBUkZboVbwJf44IdQuH6ym02K8d4wIvSy36FUN7yVUAFvrI4vtdsIPn6BPszfwBNzSRa4ZDX+vfkGxKH6zMz8Q9YxrwK9xIvMOSKMiI+ejrE7b+uaCS/u0BODBMcGzHmIbSumXh2q9VLWiotvg3eHnnKiUo6jfsseDg6WF/FHbv/SAgNoTBX+/G7+7aoJKECkL36OxUNA6hpuAXtWIgXGrsfH0+XF/2scjXRQQ/f1aOMO9NvQB1YBiHddL3tcl/ODxM4FkY5mVrLxtPF9QDq3Sv9tO03m4UeLEJVmVWaj/CfFhyp6IHguJIFBRQ21g9BF
- smithi014.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCgWp+ymVUBImv8dCNYxURc7lP0Jifv9c776zwj3+xPy5hXUcYX/hCUvfQve1Yms8rOLMWu4ordeQ/WzlKoj4ocz8ziagDQSvY0owDUsrJFzXuEUfdjkuu6N1LRiGBVBKPpBosJeYx9wc3id5qFbFn/tlLXRYORi52sfgZL975IXWUrcGNSfdjC1zusTolflmGct5n3N9BlHwhHQveY+WMzikUH98GBse80nhLliXsHe8Ff484mnbVIcAvUujf1z3TxEa9oKxGgIQNVtFAeaJBR9mwUcY4Rwk0yr1p5QVfMjduv9Bou+DisjwJpBTkzj2ZCsMSreDJ21m/G84zDXIOL
job_id:
4427052
log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2019-10-20_04:00:02-rbd-luminous-distro-basic-smithi/4427052/teuthology.log
suite_branch:
luminous
wait_time:
0:12:57
os_version:
branch:
luminous
pcp_grafana_url:
email:
ceph-qa@ceph.io
archive_path:
/home/teuthworker/archive/teuthology-2019-10-20_04:00:02-rbd-luminous-distro-basic-smithi/4427052
updated:
2019-10-20 21:20:56
description:
rbd/thrash/{base/install.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-stupid.yaml thrashers/cache.yaml thrashosds-health.yaml workloads/rbd_fsx_cache_writeback.yaml}
started:
2019-10-20 20:52:56
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2019-10-20 04:01:30
teuthology_branch:
master
sha1:
dc70b5dc10673b7be89fb1ec7f399bf6861d9a1e
name:
teuthology-2019-10-20_04:00:02-rbd-luminous-distro-basic-smithi
roles:
-
[u'mon.a', u'mon.c', u'mgr.y', u'osd.0', u'osd.1', u'osd.2', u'osd.3', u'client.0']
-
[u'mon.b', u'mgr.x', u'osd.4', u'osd.5', u'osd.6', u'osd.7', u'client.1']
overrides:
- ceph-deploy:
- fs:
xfs
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- mon osd full ratio:
0.9
- mon osd backfillfull_ratio:
0.85
- bluestore fsck on mount:
True
- mon osd nearfull ratio:
0.8
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- bluestore block size:
96636764160
- debug rocksdb:
10
- osd failsafe full ratio:
0.95
- bluestore:
True
- workunit:
- sha1:
dc70b5dc10673b7be89fb1ec7f399bf6861d9a1e
- branch:
luminous
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
but it is still running
-
objects unfound and apparently lost
-
overall HEALTH_
-
\(CACHE_POOL_NEAR_FULL\)
-
\(CACHE_POOL_NO_HIT_SET\)
-
overall HEALTH_
-
\(OSDMAP_FLAGS\)
-
\(OSD_
-
\(PG_
-
\(POOL_
-
\(CACHE_POOL_
-
\(SMALLER_PGP_NUM\)
-
\(OBJECT_
-
\(REQUEST_SLOW\)
-
\(TOO_FEW_PGS\)
-
\(MON_DOWN\)
-
slow request
- fs:
xfs
- conf:
- global:
- ms inject socket failures:
5000
- osd:
- mon osd full ratio:
0.9
- debug ms:
1
- bluestore fsck on mount:
True
- debug osd:
25
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- bluestore allocator:
stupid
- mon osd nearfull ratio:
0.8
- osd failsafe full ratio:
0.95
- bluestore block size:
96636764160
- debug filestore:
20
- debug rocksdb:
10
- osd shutdown pgref assert:
True
- mon osd backfillfull_ratio:
0.85
- debug journal:
20
- mon:
- debug mon:
20
- debug paxos:
20
- debug ms:
1
- client:
- sha1:
dc70b5dc10673b7be89fb1ec7f399bf6861d9a1e
- install:
- ceph:
- sha1:
dc70b5dc10673b7be89fb1ec7f399bf6861d9a1e
- admin_socket:
- thrashosds:
- bdev_inject_crash_probability:
0.5
- bdev_inject_crash:
2
success:
True
failure_reason:
status:
pass
nuke_on_error:
True
os_type:
runtime:
0:28:00
suite_sha1:
dc70b5dc10673b7be89fb1ec7f399bf6861d9a1e