- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
-
- ceph:
-
- exec:
- mon.a:
-
while ! ceph balancer status ; do sleep 1 ; done
-
ceph osd set-require-min-compat-client luminous
-
ceph balancer mode upmap
-
ceph balancer on
-
- thrashosds:
- dump_ops_enable:
true
- bdev_inject_crash:
2
- optrack_toggle_delay:
2.0
- random_eio:
0.0
- bdev_inject_crash_probability:
0.5
- noscrub_toggle_delay:
2.0
- chance_test_map_discontinuity:
2
- chance_pgpnum_fix:
0.25
- timeout:
1800
- chance_pgnum_grow:
0.25
- sighup_delay:
0.1
- chance_pgnum_shrink:
0.25
-
- exec:
- client.0:
-
sudo ceph osd pool create low_tier 4
-
- rados:
- op_weights:
- chunk_read:
100
- tier_promote:
10
- ops:
4000
- clients:
- objects:
300
- set_chunk:
True
- low_tier_pool:
low_tier
verbose:
True
pid:
duration:
0:14:54
owner:
scheduled_teuthology@teuthology
flavor:
basic
status_class:
success
targets:
- smithi175.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDk8LFAQSD95iQdTYYKrTBkIufZtUM75z5K7Nro+fgDXim/OdxPOttaH6+4zMPHogKSG4KN+i300FDdwdE9GCEwwaHN00FmRnFzrApxWsrS25mKrxI8jg6qjuE0eBJGhFG3DS0gzWStZx9m1kTKXLO0cLfpwI6esRAVak5Utrctn09VMKOeHXnPXz64DRAqXT/kmUt2X0oSJMuA94I3TfDtEDBZ7qzdMA5oavO4qqs5x1RqCC1xzd0Z00iaEMdTC9H20qv+30LY8esfsnJE/9y2vBxQW58zVm3fgcIUQCdMqLnLn1JtOOYfxQdQHdxA9gQTDG/t1zbnBlladJquQNBL
- smithi013.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIffVMWJ1z+iR7P2wZ6bUVJJF3wsXDZ6CtHaPKz8Iv6HGfwJnYRMNbKQ2Hou4Hk9JauHZRnqredzWp6O3UbQ98nZuqQgXCK8LoPtlom+3tBWkLq5XDefK5jqvbqzFgefOY5FXQqLgr/d5VAsPkNNhn9na6o99K2pNXB4iL5u1AclcsPwJGdRWD8K4ULYWIB2PGAA3FQa+Nou72lXsm1IhxbpIhbwoE9c3Uw9xFsJQlzr0tVRs5ayS8Wim7cnr1zuFJTANYP1hN+wxL20LJLXsYHAwNBIQVTLYmTf7nQzwtB/W80zAlc5RrnpHmbUmW2/uhvdcqma8ZERFiFIOn0vHZ
job_id:
4481871
log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2019-11-08_03:30:02-rados-nautilus-distro-basic-smithi/4481871/teuthology.log
suite_branch:
nautilus
wait_time:
0:07:05
os_version:
16.04
branch:
nautilus
pcp_grafana_url:
email:
ceph-qa@ceph.io
archive_path:
/home/teuthworker/archive/teuthology-2019-11-08_03:30:02-rados-nautilus-distro-basic-smithi/4481871
updated:
2019-11-08 15:06:16
description:
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml}
started:
2019-11-08 14:44:17
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2019-11-08 03:31:07
teuthology_branch:
master
sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
name:
teuthology-2019-11-08_03:30:02-rados-nautilus-distro-basic-smithi
roles:
-
[u'mon.a', u'mon.c', u'mgr.y', u'osd.0', u'osd.1', u'osd.2', u'osd.3', u'client.0']
-
[u'mon.b', u'mgr.x', u'osd.4', u'osd.5', u'osd.6', u'osd.7', u'client.1']
overrides:
- ceph-deploy:
- fs:
xfs
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- mon osd full ratio:
0.9
- mon osd backfillfull_ratio:
0.85
- bluestore fsck on mount:
True
- mon osd nearfull ratio:
0.8
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- bluestore block size:
96636764160
- debug rocksdb:
10
- bdev enable discard:
True
- osd failsafe full ratio:
0.95
- bdev async discard:
True
- bluestore:
True
- workunit:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- branch:
nautilus
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
\(REQUEST_STUCK\)
-
but it is still running
-
objects unfound and apparently lost
-
osd_map_cache_size
-
overall HEALTH_
-
\(OSDMAP_FLAGS\)
-
\(OSD_
-
\(PG_
-
\(POOL_
-
\(CACHE_POOL_
-
\(SMALLER_PGP_NUM\)
-
\(OBJECT_
-
\(SLOW_OPS\)
-
\(REQUEST_SLOW\)
-
\(TOO_FEW_PGS\)
-
slow requests
- fs:
xfs
- conf:
- global:
- ms inject delay max:
1
- ms inject delay type:
osd
- osd_pool_default_min_size:
2
- ms type:
async
- ms inject delay probability:
0.005
- ms inject socket failures:
2500
- osd_recovery_max_active:
10
- osd_pool_default_size:
2
- osd_recovery_max_single_start:
10
- ms inject internal delays:
0.002
- mon:
- debug mon:
20
- debug paxos:
20
- mon min osdmap epochs:
50
- debug ms:
1
- mon osdmap full prune min:
15
- mon osdmap full prune interval:
2
- mon osdmap full prune txsize:
2
- paxos service trim min:
10
- osd:
- debug journal:
20
- osd debug verify missing on start:
True
- debug ms:
1
- osd shutdown pgref assert:
True
- bdev async discard:
True
- osd max backfills:
6
- mon osd nearfull ratio:
0.8
- osd objectstore:
bluestore
- osd map cache size:
1
- osd op queue:
debug_random
- osd backoff on peering:
True
- osd scrub min interval:
60
- bdev enable discard:
True
- osd scrub during recovery:
False
- mon osd full ratio:
0.9
- osd op queue cut off:
debug_random
- mon osd backfillfull_ratio:
0.85
- bluestore fsck on mount:
True
- osd scrub max interval:
120
- debug osd:
25
- debug bluestore:
20
- debug bluefs:
20
- debug rocksdb:
10
- osd backoff on degraded:
True
- bluestore allocator:
stupid
- osd debug verify cached snaps:
True
- osd failsafe full ratio:
0.95
- bluestore block size:
96636764160
- debug filestore:
20
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- install:
- ceph:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- admin_socket:
- thrashosds:
- bdev_inject_crash_probability:
0.5
- bdev_inject_crash:
2
success:
True
failure_reason:
status:
pass
nuke_on_error:
True
os_type:
ubuntu
runtime:
0:21:59
suite_sha1:
591ee56688ccb9c388807d2903673c986fbff3e2