- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
- extra_packages:
- exclude_packages:
-
librados3
-
ceph-mgr-dashboard
-
ceph-mgr-diskprediction-local
-
ceph-mgr-diskprediction-cloud
-
ceph-mgr-rook
-
ceph-mgr-ssh
- branch:
luminous
-
- print:
**** done installing luminous
-
- ceph:
- mon_bind_addrvec:
False
- log-whitelist:
-
overall HEALTH_
-
\(FS_
-
\(MDS_
-
\(OSD_
-
\(MON_DOWN\)
-
\(CACHE_POOL_
-
\(POOL_
-
\(MGR_DOWN\)
-
\(PG_
-
\(SMALLER_PGP_NUM\)
-
Monitor daemon marked osd
-
Behind on trimming
-
Manager daemon
- mon_bind_msgr2:
False
- conf:
- global:
- mon warn on pool no app:
False
- ms bind msgr2:
False
-
- exec:
- osd.0:
-
ceph osd require-osd-release luminous
-
ceph osd set-require-min-compat-client luminous
-
- print:
**** done ceph
-
- ceph-fuse:
-
- print:
**** done luminous client
-
- exec:
- mon.a:
-
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
-
- workunit:
- cleanup:
False
- timeout:
5m
- clients:
-
- print:
**** done snap hierarchy
-
- mds_pre_upgrade:
-
- print:
**** done mds pre-upgrade sequence
-
- install.upgrade:
-
- print:
**** done install.upgrade both hosts
-
- ceph.stop:
-
- ceph.restart:
- daemons:
- mon-health-to-clog:
False
-
- print:
**** done ceph.restart
-
- exec:
- mon.a:
-
ceph status
-
ceph fs dump --format=json-pretty
-
ceph fs set cephfs max_mds 2 && exit 1 || true
-
- print:
**** confirmed cannot set max_mds=2
-
- exec:
- mon.a:
-
ceph fs set cephfs allow_new_snaps true
-
- install.upgrade:
-
- print:
**** done install.upgrade on client.0
-
- ceph-fuse:
-
- ceph-fuse:
-
- print:
**** done remount client
-
- workunit:
- timeout:
5m
- cleanup:
False
- env:
- clients:
-
- print:
**** done verify snap hierarchy
-
- cephfs_upgrade_snap:
-
- print:
**** upgraded snapshot metadata
-
- exec:
- mon.a:
-
ceph fs set cephfs max_mds 2
-
- print:
**** increased max_mds=2
-
- sleep:
-
- exec:
- mon.a:
-
ceph fs dump | grep '^max_mds.*2'
-
- workunit:
- timeout:
5m
- cleanup:
False
- env:
- clients:
-
- print:
**** done verify snap hierarchy
verbose:
True
pid:
duration:
owner:
scheduled_teuthology@teuthology
flavor:
status_class:
danger
targets:
- smithi188.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCo1CALSJmWQzhKoZzCBVCLXmJorZkIfUUS3E3XOzd+cm/4nr/W+1nCrDU0bz1bcgBJVPyfMEt8PYPV57FDKTXb/aNbPG5YbTWJAfJscrZ187CVB30l4tOs5vwFLaY0m4Tlgyb5Sh705px3cnXmY6Fh5YICX/eqV/bGmibaWskyimeLUYWtW6Roz4TbPlhAj/Nw86KguQ93TX+ukmkddDKVIfskvbAKf2dfhI/AmATKck17/X53xL84oksJHp/wyCRtG0HNg61KzHEq75e8zFElyRwMm/vIU2EmA28RQhmjDKB+a605vdVo/ipmktnGDpb4ZLZZHMLw0duotprIDHPT
- smithi141.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwo1weYcWZw1J8bMG9GPcs0nJBHkre0lJqCT4Uns+oBVfEsWozqMVJPUqo0dqdai/x64EMKE6bZOxt7XI6kAn3M7P8uJH7FziyWWv0+9x4CdUakBUcBT3tCKgkiQFd5abcGSPg0Ua0af/qtcco8b1Jul4F3kQsgWIH9/ZlojBqSPrDmywAEhE8Vpj1ccYjTPyPxUa6I4kQmZ3wbMnB073OXiURsCMA6PelaKmQ66Tcwn8jyN7wafgnWbL8czsVF2XVEi3jqbXuOAslxP4qh/Dsn4T0pRA6lKYlQVu70MiHyJWkajdhiYEsD7BGt+/ElHxfxjnPP291kRaHVfZb6oDb
- smithi151.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMchTnGRfamxOpLGycbYnLIcSHYLgUrGLPJC5LR8zkU9sV2S0fQXdmGcHX4zeIIb0R9kMhcFEhE7U4l/UxjrboprRfyip/L//VtmJkr3lrRumC9Ck9S+EClYv4M367tQ9sb8bCXe4coWJmE7cccV/0Vv+wyHddPCaXdCOLnX5dhdgDYTe+2cqXvctR46i5tJ/3HO6hZxukvMnSnkDpQvrXUJk3htSTj2nQL0bsGsNMxn95GmaBw6dHo2IL/6XOZoAANXhLeTQUcSp+sNJEjn85eVOFLyACXE6jStuiA8Veu7Sl3o1VqGqMm0i7ZlB8n3MSQC2sGKWDZMF30m07CJ+b
job_id:
4482230
log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2019-11-08_04:10:02-fs-nautilus-distro-basic-smithi/4482230/teuthology.log
suite_branch:
nautilus
os_version:
branch:
nautilus
pcp_grafana_url:
email:
ceph-qa@ceph.io
archive_path:
/home/teuthworker/archive/teuthology-2019-11-08_04:10:02-fs-nautilus-distro-basic-smithi/4482230
updated:
2019-11-11 16:37:04
description:
fs/upgrade/snaps/{clusters/3-mds.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} objectstore-ec/bluestore-bitmap.yaml overrides/{frag_enable.yaml multimds/yes.yaml whitelist_health.yaml whitelist_rstat.yaml whitelist_wrongly_marked_down.yaml} tasks/{0-luminous.yaml 1-client.yaml 2-upgrade.yaml 3-sanity.yaml 4-client-upgrade/yes.yaml 5-client-sanity.yaml 6-snap-upgrade.yaml 7-client-sanity.yaml}}
started:
2019-11-11 04:34:36
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2019-11-08 04:11:29
teuthology_branch:
master
sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
name:
teuthology-2019-11-08_04:10:02-fs-nautilus-distro-basic-smithi
roles:
-
[u'mon.a', u'mon.c', u'mgr.y', u'mds.a', u'osd.0', u'osd.1', u'osd.2', u'osd.3']
-
[u'mon.b', u'mgr.x', u'mds.b', u'mds.c', u'osd.4', u'osd.5', u'osd.6', u'osd.7']
-
[u'client.0', u'client.1']
overrides:
- ceph-deploy:
- fs:
xfs
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- mon osd full ratio:
0.9
- mon osd backfillfull_ratio:
0.85
- bluestore fsck on mount:
True
- mon osd nearfull ratio:
0.8
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- bluestore block size:
96636764160
- debug rocksdb:
10
- bdev enable discard:
True
- osd failsafe full ratio:
0.95
- bdev async discard:
True
- bluestore:
True
- workunit:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- branch:
nautilus
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
overall HEALTH_
-
\(FS_DEGRADED\)
-
\(MDS_FAILED\)
-
\(MDS_DEGRADED\)
-
\(FS_WITH_FAILED_MDS\)
-
\(MDS_DAMAGE\)
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
inconsistent rstat on inode
-
overall HEALTH_
-
\(OSD_DOWN\)
-
\(OSD_
-
but it is still running
-
is not responding
-
bad backtrace on inode
- fs:
xfs
- max_mds:
2
- conf:
- mds:
- mds bal split bits:
3
- mds bal split size:
100
- osd op complaint time:
180
- debug mds:
20
- mds bal merge size:
5
- debug ms:
1
- mds bal frag:
True
- mds verify scatter:
0
- mds bal fragment size max:
10000
- mds op complaint time:
180
- mds debug scatterstat:
0
- mds debug frag:
True
- global:
- bluestore warn on legacy statfs:
False
- osd:
- mon osd full ratio:
0.9
- debug ms:
1
- bdev enable discard:
True
- bluestore fsck on mount:
True
- debug osd:
25
- osd op complaint time:
180
- debug bluestore:
20
- debug bluefs:
20
- osd objectstore:
bluestore
- bdev async discard:
True
- bluestore allocator:
bitmap
- osd failsafe full ratio:
0.95
- bluestore block size:
96636764160
- debug filestore:
20
- debug rocksdb:
10
- mon osd nearfull ratio:
0.8
- mon osd backfillfull_ratio:
0.85
- debug journal:
20
- mon:
- debug mon:
20
- debug paxos:
20
- mon op complaint time:
120
- debug ms:
1
- client:
- debug ms:
1
- debug client:
20
- client mount timeout:
600
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- install:
- ceph:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- admin_socket:
- thrashosds:
- bdev_inject_crash_probability:
0.5
- bdev_inject_crash:
2
success:
failure_reason:
status:
dead
nuke_on_error:
True
os_type:
runtime:
12:02:28
suite_sha1:
591ee56688ccb9c388807d2903673c986fbff3e2