- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
- sha1:
226a40b547a74486c252cd9b1f9d10f39009c4d4
- exclude_packages:
-
ceph-mgr
-
libcephfs2
-
libcephfs-devel
-
libcephfs-dev
-
python36-cephfs
-
python36-rados
-
python36-rbd
-
python36-rgw
-
python36-ceph-argparse
-
python3-cephfs
-
python3-rados
- branch:
jewel
-
- print:
**** done install jewel
-
- ceph:
- log-whitelist:
-
required past_interval bounds are empty
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
but it is still running
-
wrongly marked me down
-
objects unfound and apparently lost
-
log bound mismatch
-
no active mgr
-
ruleset-
-
slow request
-
overall HEALTH_
-
\(OSDMAP_FLAGS\)
-
\(OSD_
-
\(PG_
-
\(POOL_
-
\(CACHE_POOL_
-
\(SMALLER_PGP_NUM\)
-
\(OBJECT_
-
\(REQUEST_SLOW\)
-
\(TOO_FEW_PGS\)
-
\(MON_DOWN\)
-
slow request
- skip_mgr_daemons:
True
- sha1:
226a40b547a74486c252cd9b1f9d10f39009c4d4
- cluster:
ceph
- fs:
xfs
- conf:
- mon:
- debug mon:
20
- debug paxos:
20
- mon warn on osd down out interval zero:
False
- debug ms:
1
- osd:
- debug osd:
25
- debug filestore:
20
- debug journal:
20
- debug ms:
1
- add_osds_to_crush:
True
-
- print:
**** done ceph
-
- full_sequential_finally:
-
- exec:
- mon.a:
-
ceph pg dump -f json-pretty
-
ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'
install.upgrade:
print:
**** done install.upgrade osd.0
ceph.restart:
- daemons:
-
mon.a
-
mon.b
-
mon.c
-
osd.0
-
osd.1
-
osd.2
print:
**** done ceph.restart 1st half
exec:
- osd.0:
-
ceph osd set pglog_hardlimit && exit 1 || true
-
ceph osd dump --format=json-pretty | grep "flags"
print:
**** try to set pglog_hardlimit, should not succeed
parallel:
install.upgrade:
ceph.restart:
- daemons:
- wait-for-healthy:
False
- wait-for-osds-up:
True
exec:
- osd.0:
-
ceph osd require-osd-release luminous
print:
**** done `ceph osd require-osd-release luminous`
exec:
- osd.0:
-
ceph osd dump --format=json-pretty | grep "flags"
-
ceph osd set pglog_hardlimit
-
ceph osd dump --format=json-pretty | grep "flags"
print:
**** try to set pglog_hardlimit again, should succeed
exec:
- mgr.x:
-
mkdir -p /var/lib/ceph/mgr/ceph-x
-
ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
-
ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
ceph.restart:
- daemons:
- wait-for-healthy:
False
exec:
- osd.0:
-
ceph osd require-osd-release luminous
-
ceph osd set-require-min-compat-client luminous
ceph.healthy:
exec:
- mon.a:
-
ceph osd set-require-min-compat-client jewel
-
ceph osd crush set-all-straw-buckets-to-straw2
-
ceph osd crush weight-set create-compat
-
ceph osd crush weight-set reweight-compat osd.0 .9
-
ceph osd crush weight-set reweight-compat osd.1 1.2
workunit:
- clients:
- client.0:
-
rbd/test_librbd_python.sh
- branch:
luminous
- sha1:
226a40b547a74486c252cd9b1f9d10f39009c4d4
print:
**** done rbd/test_librbd_python.sh 9-workload
rgw:
print:
**** done rgw 9-workload
swift:
print:
**** done swift 9-workload
rados:
- op_weights:
- snap_remove:
50
- snap_create:
50
- rollback:
50
- read:
100
- write:
100
- delete:
50
- clients:
- write_append_excl:
False
- objects:
500
- ops:
4000
verbose:
True
pid:
duration:
2:41:02
owner:
scheduled_teuthology@teuthology
flavor:
basic
status_class:
success
targets:
- smithi073.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMoL9GfEu5kFGDe0KZywoduX69A2exJ5Y2//Top6/p/zPZijSSVqsoE/zQNYOUrRxR+brymRWVPP74BpVteAvqNE33SoG0QaHMQVQMF+aqGFQ5IvFrEs7qRrN9Ee0MoDPB6uyH0dhNKpCZdJCtqfZ83PbjKqAs45M/S9tA3D2wBqPsy9WIssrt2C6iAgXqgBGGXPZxskqN9UBF6VMRZ5UboP3SLzpZRS2yWSaTaBjV7qAb98K6RoxbLE0rpMOUSeCizlqOH4nyzE6A5sSpnRSusv1H6ruE1FsshD29hlzXE00/DxH5sLMtp1nihh7BmcA4fVo4B5nuTy4VdrAWwplB
- smithi092.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCZlJEErErV1Q/foUgl84m1eUfrH6Tv8XB64eCGhG/r0sII7RZwWLgRCXZQEn97Xz+BMR2QZoUVSa25byZOb9ZFago9eM2V0ZDISN/ZHi5Tq7DbxX8wy3JrGYZYTFvIWGCJ4z6IDhCHcbk0eeEqQhC4/l/YohzEZON92TsXr64G5+294QtzyAdKjB7A7iyeY/mS6xONd/QadGJyCDgxRjPATzopr3tl0OgMM1A5VHQu66eP38NF4fti1b7FZF7lTegus8J6MBzFbJWEuwRQIOGs31grbOeLfzoapJAuIfDfRZ/+YBhVeZlxpGVzamVDCWmUwrk4wAO4aPiZdIRywJtv
- smithi173.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBzuhBE0rFS4qVrAiUb0ez46Q0fLlCdj/9bShq65VzinaMyy5ELaNCNdh6Fu2IbXBeVVxzszBxsIchb232Ish3LWb4YqeQtZFPtAMfKG/Bg7b2Vydw0N93/9Bfldj14sGQKTYYT0bEAleEWP95xOwRGDuKh9WwQFqRHS5G8AvmPqIWD3duYT5mRIvRAxRiNvHGmIEnBHS1yNYz8zGQpsHHICxEnqbaD8244OEJAnTBmxqJpcexgC5z+ZLT/cn/cwE/OoIxlopMbfyXUD2cly0W86V0Sl3l8UpwDAfuzbJyG/VOAlA2x8vOPYqsKBYDgznJ1eK8raWaZMt3PUNVe3OZ
job_id:
4482354
log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2019-11-08_04:23:02-upgrade:jewel-x-luminous-distro-basic-smithi/4482354/teuthology.log
suite_branch:
luminous
wait_time:
0:27:00
os_version:
16.04
branch:
luminous
pcp_grafana_url:
email:
ceph-qa@ceph.io
archive_path:
/home/teuthworker/archive/teuthology-2019-11-08_04:23:02-upgrade:jewel-x-luminous-distro-basic-smithi/4482354
updated:
2019-11-11 10:58:50
description:
upgrade:jewel-x/stress-split/{0-cluster/{openstack.yaml start.yaml} 1-jewel-install/jewel.yaml 1.1-pg-log-overrides/normal_pg_log.yaml 1.5-final-scrub.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{radosbench.yaml rbd-cls.yaml rbd-import-export.yaml rbd_api.yaml readwrite.yaml snaps-few-objects.yaml} 5-finish-upgrade.yaml 6-luminous.yaml 6.5-crush-compat.yaml 7-final-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} distros/ubuntu_latest.yaml slow_requests.yaml thrashosds-health.yaml}
started:
2019-11-11 07:50:48
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2019-11-08 04:24:00
teuthology_branch:
master
sha1:
226a40b547a74486c252cd9b1f9d10f39009c4d4
name:
teuthology-2019-11-08_04:23:02-upgrade:jewel-x-luminous-distro-basic-smithi
roles:
-
[u'mon.a', u'mon.b', u'mon.c', u'mgr.x', u'osd.0', u'osd.1', u'osd.2']
-
[u'osd.3', u'osd.4', u'osd.5']
-
[u'client.0']
overrides:
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
but it is still running
-
wrongly marked me down
-
objects unfound and apparently lost
-
log bound mismatch
-
no active mgr
-
ruleset-
-
slow request
-
overall HEALTH_
-
\(OSDMAP_FLAGS\)
-
\(OSD_
-
\(PG_
-
\(POOL_
-
\(CACHE_POOL_
-
\(SMALLER_PGP_NUM\)
-
\(OBJECT_
-
\(REQUEST_SLOW\)
-
\(TOO_FEW_PGS\)
-
\(MON_DOWN\)
-
slow request
- fs:
xfs
- conf:
- mon:
- debug mon:
20
- debug paxos:
20
- mon warn on osd down out interval zero:
False
- debug ms:
1
- osd:
- debug osd:
25
- debug filestore:
20
- debug journal:
20
- debug ms:
1
- sha1:
226a40b547a74486c252cd9b1f9d10f39009c4d4
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- workunit:
- sha1:
226a40b547a74486c252cd9b1f9d10f39009c4d4
- branch:
luminous
- install:
- ceph:
- sha1:
226a40b547a74486c252cd9b1f9d10f39009c4d4
- admin_socket:
success:
True
failure_reason:
status:
pass
nuke_on_error:
True
os_type:
ubuntu
runtime:
3:08:02
suite_sha1:
226a40b547a74486c252cd9b1f9d10f39009c4d4