- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- extra_packages:
- exclude_packages:
-
librados3
-
ceph-mgr-dashboard
-
ceph-mgr-diskprediction-local
-
ceph-mgr-diskprediction-cloud
-
ceph-mgr-rook
-
ceph-mgr-ssh
- branch:
luminous
-
- print:
**** done installing luminous
-
- ceph:
- mon_bind_addrvec:
False
- fs:
xfs
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- cluster:
ceph
- log-whitelist:
-
overall HEALTH_
-
\(FS_
-
\(MDS_
-
\(OSD_
-
\(MON_DOWN\)
-
\(CACHE_POOL_
-
\(POOL_
-
\(MGR_DOWN\)
-
\(PG_
-
\(SMALLER_PGP_NUM\)
-
Monitor daemon marked osd
-
Behind on trimming
-
Manager daemon
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
overall HEALTH_
-
\(FS_DEGRADED\)
-
\(MDS_FAILED\)
-
\(MDS_DEGRADED\)
-
\(FS_WITH_FAILED_MDS\)
-
\(MDS_DAMAGE\)
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
inconsistent rstat on inode
-
overall HEALTH_
-
\(OSD_DOWN\)
-
\(OSD_
-
but it is still running
-
is not responding
-
bad backtrace on inode
- mon_bind_msgr2:
False
- conf:
- mds:
- mds bal split bits:
3
- mds bal split size:
100
- osd op complaint time:
180
- debug mds:
20
- mds bal merge size:
5
- debug ms:
1
- mds bal frag:
True
- mds verify scatter:
0
- mds bal fragment size max:
10000
- mds op complaint time:
180
- mds debug scatterstat:
0
- mds debug frag:
True
- client:
- debug ms:
1
- debug client:
20
- client mount timeout:
600
- global:
- mon warn on pool no app:
False
- ms bind msgr2:
False
- bluestore warn on legacy statfs:
False
- osd:
- debug ms:
1
- debug journal:
20
- debug osd:
25
- osd objectstore:
filestore
- osd sloppy crc:
True
- debug filestore:
20
- osd op complaint time:
180
- mon:
- debug mon:
20
- debug paxos:
20
- mon op complaint time:
120
- debug ms:
1
- max_mds:
1
-
- exec:
- osd.0:
-
ceph osd require-osd-release luminous
-
ceph osd set-require-min-compat-client luminous
-
- print:
**** done ceph
-
- ceph-fuse:
-
- print:
**** done luminous client
-
- exec:
- mon.a:
-
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
-
- workunit:
- timeout:
5m
- cleanup:
False
- branch:
nautilus
- clients:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
-
- print:
**** done snap hierarchy
-
- mds_pre_upgrade:
-
- print:
**** done mds pre-upgrade sequence
-
- install.upgrade:
-
- print:
**** done install.upgrade both hosts
-
- ceph.stop:
-
- ceph.restart:
- daemons:
- mon-health-to-clog:
False
-
- print:
**** done ceph.restart
-
- exec:
- mon.a:
-
ceph status
-
ceph fs dump --format=json-pretty
-
ceph fs set cephfs max_mds 2 && exit 1 || true
-
- print:
**** confirmed cannot set max_mds=2
-
- exec:
- mon.a:
-
ceph fs set cephfs allow_new_snaps true
-
- workunit:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- env:
- clients:
- cleanup:
False
- timeout:
5m
- branch:
nautilus
-
- print:
**** done verify snap hierarchy
-
- cephfs_upgrade_snap:
-
- print:
**** upgraded snapshot metadata
-
- exec:
- mon.a:
-
ceph fs set cephfs max_mds 2
-
- print:
**** increased max_mds=2
-
- sleep:
-
- exec:
- mon.a:
-
ceph fs dump | grep '^max_mds.*2'
-
- workunit:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- env:
- clients:
- cleanup:
False
- timeout:
5m
- branch:
nautilus
-
- print:
**** done verify snap hierarchy
verbose:
True
pid:
duration:
0:28:16
owner:
scheduled_teuthology@teuthology
flavor:
basic
status_class:
success
targets:
- smithi162.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCh85JndHccZdb43WjrDkvyBmg07BHhdpUTdFIdNzm3EH8yiebIkvM/7bRtzOrn+8xJwM2ixCVLkKmOutWgZukX1P2mOnSfNUVSztsuGPrXp5SPdjlkjLTPo2c0i5FhDfAgmtQxyM/amrvfnYM/rZr7xR1L8C1ShH3oRUNLMTl3jOS88uTOpgJWGsfEThzEttavK7sh4+IFfa45y/cnZoMAr61l1KIpCx5tIzpKzvYepqtiwEkb0jtjDM/EDfzsUly1QY4XKAJ6h4AKTUOnzpYrAt3qiu4Prde8zblD+ReXJ+lCNQxBOaBGu3v4+bBKp/WTuXuJ7TF/kXRZjV593l+d
- smithi107.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfaiMG0RiRFI8kG8o8pbMgkvAgVHd2450oE5JeCzrFXHPCGx4h1BJN+Sa03dcEbSPQ7sJeqh2uyGFzhb+35il13ITv3wobTF+YmGyFcg2zQ0RKvTKD+VGwxYO/+AyJeybCC626UOMbQ5stbTrdGSuKYd53EMW/gFbhINBE5/9xfR4iZWGxJJ7E99vTYWdIBGqp9KgeU6gLlLairanhnmsR2XYcFhWBs8SZSLpwo0VS4rGwh03bvKOp3wIuD3skPqTjfV+9UuwWJ5uiHtdVgfp6UaCBiofxft1naej4ESlbMUxiqNHYPR8Hl39c+d01a7RRoMYTOr1kd++V1PYJFiQp
- smithi039.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDSe88QQXYIDI1FFv7vjoT4p+OAokuK++Njw9G4djWCMdmCsyE0/H4xs7R0gGdwCATsS7sfNkB57lE8sbZqUkIqUC942T8gAmMnI42t6sSt3oZM+2ZuHzWxEw58k9aS/9vKebxL5sZKM3+8jsg/Gonyte3WawF6RfJu/n/gcxfncgXkGBaV5vMpIl5yP15HMANDDXPJa+wgZORTCCorOMiCnFae0ZHj3CEcYyBbxAheACyyDQaLanP3TWaFG6NElTmhSJitJk0PcktB6AkH3dxDHbYvu67IB0pC1BVFVYYhmMwlP0c6n5sA1PCleLzPyNLlaWg76RgUCfgfSaiZlol
job_id:
4482222
log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2019-11-08_04:10:02-fs-nautilus-distro-basic-smithi/4482222/teuthology.log
suite_branch:
nautilus
wait_time:
1:55:45
os_version:
branch:
nautilus
pcp_grafana_url:
email:
ceph-qa@ceph.io
archive_path:
/home/teuthworker/archive/teuthology-2019-11-08_04:10:02-fs-nautilus-distro-basic-smithi/4482222
updated:
2019-11-11 06:46:26
description:
fs/upgrade/snaps/{clusters/3-mds.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml multimds/no.yaml whitelist_health.yaml whitelist_rstat.yaml whitelist_wrongly_marked_down.yaml} tasks/{0-luminous.yaml 1-client.yaml 2-upgrade.yaml 3-sanity.yaml 4-client-upgrade/no.yaml 5-client-sanity.yaml 6-snap-upgrade.yaml 7-client-sanity.yaml}}
started:
2019-11-11 04:22:25
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2019-11-08 04:11:23
teuthology_branch:
master
sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
name:
teuthology-2019-11-08_04:10:02-fs-nautilus-distro-basic-smithi
roles:
-
[u'mon.a', u'mon.c', u'mgr.y', u'mds.a', u'osd.0', u'osd.1', u'osd.2', u'osd.3']
-
[u'mon.b', u'mgr.x', u'mds.b', u'mds.c', u'osd.4', u'osd.5', u'osd.6', u'osd.7']
-
[u'client.0', u'client.1']
overrides:
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
overall HEALTH_
-
\(FS_DEGRADED\)
-
\(MDS_FAILED\)
-
\(MDS_DEGRADED\)
-
\(FS_WITH_FAILED_MDS\)
-
\(MDS_DAMAGE\)
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
inconsistent rstat on inode
-
overall HEALTH_
-
\(OSD_DOWN\)
-
\(OSD_
-
but it is still running
-
is not responding
-
bad backtrace on inode
- fs:
xfs
- max_mds:
1
- conf:
- mds:
- mds bal split bits:
3
- mds bal split size:
100
- osd op complaint time:
180
- debug mds:
20
- mds bal merge size:
5
- debug ms:
1
- mds bal frag:
True
- mds verify scatter:
0
- mds bal fragment size max:
10000
- mds op complaint time:
180
- mds debug scatterstat:
0
- mds debug frag:
True
- global:
- bluestore warn on legacy statfs:
False
- osd:
- debug ms:
1
- debug journal:
20
- debug osd:
25
- osd objectstore:
filestore
- osd sloppy crc:
True
- debug filestore:
20
- osd op complaint time:
180
- mon:
- debug mon:
20
- debug paxos:
20
- mon op complaint time:
120
- debug ms:
1
- client:
- debug ms:
1
- debug client:
20
- client mount timeout:
600
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- ceph-deploy:
- fs:
xfs
- filestore:
True
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- osd sloppy crc:
True
- osd objectstore:
filestore
- workunit:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- branch:
nautilus
- install:
- ceph:
- sha1:
591ee56688ccb9c388807d2903673c986fbff3e2
- admin_socket:
success:
True
failure_reason:
status:
pass
nuke_on_error:
True
os_type:
runtime:
2:24:01
suite_sha1:
591ee56688ccb9c388807d2903673c986fbff3e2