- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
-
- ceph:
-
- full_sequential_finally:
-
- exec:
- mon.a:
-
ceph osd require-osd-release luminous
-
ceph osd pool application enable base rados || true
-
rados -p rbd bench 5 write -b 4096
-
- ceph.healthy:
-
- ceph.osd_scrub_pgs:
-
- exec:
- mon.a:
-
sleep 15
-
ceph osd dump | grep purged_snapdirs
-
ceph pg dump -f json-pretty
-
ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'
thrashosds:
- dump_ops_enable:
true
- optrack_toggle_delay:
2.0
- random_eio:
0.0
- noscrub_toggle_delay:
2.0
- chance_thrash_cluster_full:
0
- chance_pgpnum_fix:
1
- timeout:
1200
- chance_pgnum_grow:
3
- sighup_delay:
0.1
exec:
- client.0:
-
sudo ceph osd pool create base 4
-
sudo ceph osd pool application enable base rados
-
sudo ceph osd pool create cache 4
-
sudo ceph osd tier add base cache
-
sudo ceph osd tier cache-mode cache readproxy
-
sudo ceph osd tier set-overlay base cache
-
sudo ceph osd pool set cache hit_set_type bloom
-
sudo ceph osd pool set cache hit_set_count 8
-
sudo ceph osd pool set cache hit_set_period 3600
-
sudo ceph osd pool set cache target_max_objects 250
rados:
- op_weights:
- snap_remove:
50
- write:
100
- rollback:
50
- read:
100
- copy_from:
50
- snap_create:
50
- cache_try_flush:
50
- cache_flush:
50
- cache_evict:
50
- delete:
50
- ops:
4000
- pool_snaps:
True
- clients:
- objects:
500
- pools:
[]
verbose:
False
pid:
duration:
0:20:35
owner:
scheduled_xxg@teuthology
flavor:
basic
status_class:
success
targets:
- smithi008.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpdquAS7gHuDmmuRdrk1TfFipfTZY2/E85zXhy/crIDgxqOd/nv/2gNw7Ajkm3/YZmT84c1X173j6trVsBgnYUIaFrKLNLKzzQu3r4NzSQ/U3rWuBUXDUSAwBmkTeTA5yc8gG/3wETA57A9Pm9KRYms+oXlTHI7ir4PDkGzPtppYiexb/uMnakcbRB5UQgjBWvl8ehI/ykJh6rnvCJCpDKqFBseEggys7xrmT34HyIjK6EJMWRF8Er3QEDZyYjKtP47Mwe2rOgLwgFyOZqcdV6RuFoRGIrzJlGgHFS529XQtS2EfAUm1ia13ppJLF1SLW4Ac60jpDa6vSqfauceH9j
- smithi151.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCSzs79c50EGgdXSUXxOMTwrVesP9T6oktu7ydqRNJ6KA+MHfbmp9YrWA9ZHEdR88eQNEG0bRmdkUbgjj8dFcoHmaAPq7Gxahq7ckMJBATOH9kLzkGVpyThnyD0C5EVBzkCdbGfPF2R5lnSmHxi8Clc3K9rnMLZPf+Oc34se3cALGhVzRmgjEKDpbjGpfd9scv0KxsGsAdGheqUYhCXeTtdkJvuu3XNxAHpj3HgTISi/Pn2d3f7zpOJXP2Gz+gu5fyGhWSRBNETeH0ceypEixTUjVtNWbalmQb011rctM70qabRI2XB2Hlt4WAXNRPBj6Z6H/FTNa/ip2yeYXluTafZ
job_id:
4484052
log_href:
http://qa-proxy.ceph.com/teuthology/xxg-2019-11-08_08:14:27-rados:thrash-wip-luminous-final-distro-basic-smithi/4484052/teuthology.log
suite_branch:
wip-luminous-final
wait_time:
0:13:29
os_version:
branch:
wip-luminous-final
pcp_grafana_url:
email:
archive_path:
/home/teuthworker/archive/xxg-2019-11-08_08:14:27-rados:thrash-wip-luminous-final-distro-basic-smithi/4484052
updated:
2019-11-08 13:11:31
description:
rados:thrash/{0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-require-luminous/at-end.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml}
started:
2019-11-08 12:37:27
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2019-11-08 08:16:45
teuthology_branch:
master
sha1:
b0a712b624b9439c0f4ad96c7fea7272fdde780f
name:
xxg-2019-11-08_08:14:27-rados:thrash-wip-luminous-final-distro-basic-smithi
roles:
-
[u'mon.a', u'mon.c', u'mgr.y', u'osd.0', u'osd.1', u'osd.2', u'osd.3', u'client.0']
-
[u'mon.b', u'mgr.x', u'osd.4', u'osd.5', u'osd.6', u'osd.7', u'client.1']
overrides:
- ceph-deploy:
- fs:
xfs
- filestore:
True
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- osd sloppy crc:
True
- osd objectstore:
filestore
- workunit:
- sha1:
b0a712b624b9439c0f4ad96c7fea7272fdde780f
- branch:
wip-luminous-final
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
overall HEALTH_
-
\(PG_AVAILABILITY\)
-
\(PG_DEGRADED\)
-
\(OSD_SLOW_PING_TIME
-
but it is still running
-
objects unfound and apparently lost
-
overall HEALTH_
-
\(OSDMAP_FLAGS\)
-
\(OSD_
-
\(PG_
-
\(POOL_
-
\(CACHE_POOL_
-
\(SMALLER_PGP_NUM\)
-
\(OBJECT_
-
\(REQUEST_SLOW\)
-
\(TOO_FEW_PGS\)
-
\(MON_DOWN\)
-
slow request
-
must scrub before tier agent can activate
- fs:
xfs
- conf:
- global:
- osd_pool_default_size:
2
- osd_pool_default_min_size:
1
- enable experimental unrecoverable data corrupting features:
*
- osd_max_pg_log_entries:
2
- mon debug no require luminous:
True
- osd_min_pg_log_entries:
1
- ms type:
simple
- ms inject socket failures:
5000
- osd_recovery_max_single_start:
10
- osd_recovery_max_active:
10
- mon:
- debug mon:
20
- debug paxos:
20
- mon keyvaluedb:
rocksdb
- debug ms:
1
- osd:
- osd op queue cut off:
debug_random
- filestore queue throttle max multiple:
10
- debug journal:
20
- osd scrub max interval:
120
- osd op queue:
debug_random
- osd max backfills:
9
- debug osd:
25
- osd objectstore:
filestore
- osd debug verify cached snaps:
True
- osd sloppy crc:
True
- osd debug verify missing on start:
True
- journal throttle high multiple:
2
- filestore queue throttle high multiple:
2
- osd backoff on peering:
True
- debug ms:
1
- osd scrub min interval:
60
- debug filestore:
20
- osd heartbeat use min delay socket:
True
- journal throttle max multiple:
10
- osd shutdown pgref assert:
True
- sha1:
b0a712b624b9439c0f4ad96c7fea7272fdde780f
- install:
- ceph:
- sha1:
b0a712b624b9439c0f4ad96c7fea7272fdde780f
- admin_socket:
- branch:
wip-luminous-final
- thrashosds:
- chance_thrash_cluster_full:
0
success:
True
failure_reason:
status:
pass
nuke_on_error:
True
os_type:
runtime:
0:34:04
suite_sha1:
b0a712b624b9439c0f4ad96c7fea7272fdde780f