- kernel:
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- install:
- tag:
v12.2.10
- extra_packages:
- exclude_packages:
- sha1:
1eab56e9f55cfd99f07ab11f9f6da9c1cd3ff250
-
- print:
**** done install luminous v12.2.10
-
- ceph:
-
- exec:
- osd.0:
-
ceph osd require-osd-release luminous
-
ceph osd set-require-min-compat-client luminous
-
- print:
**** done ceph
-
- install.upgrade:
-
- print:
**** done install.upgrade osd.0
-
- ceph.restart:
- daemons:
-
mon.a
-
mon.b
-
mon.c
-
osd.0
-
osd.1
-
osd.2
-
- print:
**** done ceph.restart 1st half
-
- exec:
- osd.0:
-
ceph osd set pglog_hardlimit && exit 1 || true
-
ceph osd dump --format=json-pretty | grep "flags"
-
- print:
**** try to set pglog_hardlimit, should not succeed
-
- parallel:
-
- install.upgrade:
-
- ceph.restart:
- daemons:
- wait-for-healthy:
False
- wait-for-osds-up:
True
-
- exec:
- osd.0:
-
ceph osd set pglog_hardlimit
-
ceph osd dump --format=json-pretty | grep "flags"
-
- print:
**** try to set pglog_hardlimit again, should succeed
-
- workunit:
- clients:
- client.0:
-
rbd/test_librbd_python.sh
- branch:
luminous
- sha1:
1eab56e9f55cfd99f07ab11f9f6da9c1cd3ff250
-
- print:
**** done rbd/test_librbd_python.sh 9-workload
-
- rgw:
-
- print:
**** done rgw 9-workload
-
- swift:
-
- print:
**** done swift 9-workload
-
- rados:
- op_weights:
- snap_remove:
50
- snap_create:
50
- rollback:
50
- read:
100
- write:
100
- delete:
50
- clients:
- write_append_excl:
False
- objects:
500
- ops:
4000
verbose:
True
pid:
duration:
1:43:37
owner:
scheduled_teuthology@teuthology
flavor:
basic
status_class:
success
targets:
- smithi008.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxWOoEKK9EtyZcnBp8dWke3DSKjZxWIkXmPjE7U8GsUoW7cu88/UBRIXbZCWx5LMGKDTqrdUsBjTK4WyeOcF0Dv3HADgMzv65O8tdLumGrlKhMo8g4Q4joP0321Nq2wybdXHJj1O7W3HU0m6FIP8v3VnUMer/uUc3Lpspcd/pL+iqPYUHBkSlUVYKqKsLM8BcIMB28PZlfq7nJEwT+YaI6nl6Rv1uJqBAFJbYcmN60kXbENMdNHpYeI5IE5wh7MWVSdNHmAmNMAAVtwNiM+Z+FEZka1MtPKPyPgfH5RFqCu9q+6z0uu0Yz6LDJmWy/FjMCCij5aJG5EIwhuTDRGmPz
- smithi058.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzVA23HqOLh1MI1kvsfpWeIjPzh19AsIXeZNTrXgOVhwSemhQhkSNMmk8B42HWbYQEaRkEmyaQYkApitdRk2J8SDIAsPJi0N1PsC/LWxp3+VAk8VS6hYbBssFJboInERHqRfrvHSfGCLw3wJSD63rSqNPwiJRbsf+eG7svToU6MhgbI9ErCbzDQjw+kdej6tuRzg1iop6Y94Qh99NvWSF/8jP+RXBTqWR3sgJ6XD8tx/RwvUp2dhZp/QoSH+xPIlJHUbNobd+2Ccu2/RAwjVcpTuhN0GZPqCcLJXuyeOzhMPVugnyw4jy0TADTDRj/Xp8kGr868nXyYXv+WB1FqT1d
- smithi065.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEReNMgCXprOG9F7Id1Ym2SHGIBMu/7MuGMWqUfcRYDw1pJ+mqhIwboIFPVVbUMwxjz5TpNWi3nK2eBjsLIb/iTaOS8XngfTIFeRvsD4SyrFhEAsOZlqU4EangJSNsXhlIQk7YEkJbPWU4EezS4Q6vdrMU0MK+N/ETne8RBSJxIWTNeKNa2T+mlvMT8aF+IoUbb1p1NDz99sY/0N/F9gOgG0iHvNou1CvKbRdkA/NXl5R8sBO2Por0AyTLFY1NxzzqTi5KGuQYc2jviXjXpys2oTng3y2004xTbuB6QWrBbJqBLphmZXGZqnYy53SdOuT34vIEpiACB2K4cmUrtIXj
job_id:
3571439
log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2019-02-10_05:30:02-upgrade:luminous-p2p-luminous-distro-basic-smithi/3571439/teuthology.log
suite_branch:
luminous
wait_time:
0:12:24
os_version:
16.04
branch:
luminous
pcp_grafana_url:
email:
ceph-qa@lists.ceph.com
archive_path:
/home/teuthworker/archive/teuthology-2019-02-10_05:30:02-upgrade:luminous-p2p-luminous-distro-basic-smithi/3571439
updated:
2019-02-11 14:41:26
description:
upgrade:luminous-p2p/luminous-p2p-stress-split/{0-cluster/{openstack.yaml start.yaml} 1-ceph-install/luminous.yaml 1.1-pg-log-overrides/short_pg_log.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{radosbench.yaml rbd-cls.yaml rbd-import-export.yaml rbd_api.yaml readwrite.yaml snaps-few-objects.yaml} 5-finish-upgrade.yaml 7-final-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} supported/ubuntu_latest.yaml thrashosds-health.yaml}
started:
2019-02-11 12:45:25
last_in_suite:
False
machine_type:
smithi
sentry_event:
posted:
2019-02-10 05:30:21
teuthology_branch:
master
sha1:
1eab56e9f55cfd99f07ab11f9f6da9c1cd3ff250
name:
teuthology-2019-02-10_05:30:02-upgrade:luminous-p2p-luminous-distro-basic-smithi
roles:
-
[u'mon.a', u'mon.b', u'mon.c', u'mgr.x', u'osd.0', u'osd.1', u'osd.2']
-
[u'osd.3', u'osd.4', u'osd.5']
-
[u'client.0']
overrides:
- ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
but it is still running
-
wrongly marked me down
-
objects unfound and apparently lost
-
log bound mismatch
-
overall HEALTH_
-
\(OSDMAP_FLAGS\)
-
\(OSD_
-
\(PG_
-
\(POOL_
-
\(CACHE_POOL_
-
\(SMALLER_PGP_NUM\)
-
\(OBJECT_
-
\(REQUEST_SLOW\)
-
\(TOO_FEW_PGS\)
-
\(MON_DOWN\)
-
slow request
- fs:
xfs
- conf:
- global:
- osd_max_pg_log_entries:
2
- osd_min_pg_log_entries:
1
- mon:
- debug mon:
20
- debug paxos:
20
- mon warn on osd down out interval zero:
False
- debug ms:
1
- osd:
- debug osd:
25
- debug filestore:
20
- debug journal:
20
- debug ms:
1
- sha1:
1eab56e9f55cfd99f07ab11f9f6da9c1cd3ff250
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- workunit:
- sha1:
1eab56e9f55cfd99f07ab11f9f6da9c1cd3ff250
- branch:
luminous
- install:
- ceph:
- sha1:
1eab56e9f55cfd99f07ab11f9f6da9c1cd3ff250
- admin_socket:
success:
True
failure_reason:
status:
pass
nuke_on_error:
True
os_type:
ubuntu
runtime:
1:56:01
suite_sha1:
1eab56e9f55cfd99f07ab11f9f6da9c1cd3ff250