- log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2017-12-31_04:23:01-upgrade:jewel-x-luminous-distro-basic-vps/2010818/teuthology.log
- archive_path:
/home/teuthworker/archive/teuthology-2017-12-31_04:23:01-upgrade:jewel-x-luminous-distro-basic-vps/2010818
- description:
upgrade:jewel-x/point-to-point-x/{distros/centos_7.3.yaml point-to-point-upgrade.yaml}
- duration:
1:28:40
- email:
ceph-qa@ceph.com
- failure_reason:
- flavor:
basic
- job_id:
2010818
- kernel:
- last_in_suite:
False
- machine_type:
vps
- name:
teuthology-2017-12-31_04:23:01-upgrade:jewel-x-luminous-distro-basic-vps
- nuke_on_error:
True
- os_type:
centos
- os_version:
7.3
- overrides:
- s3tests:
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- ceph-fuse:
- client.0:
- mount_wait:
60
- mount_timeout:
120
- workunit:
- sha1:
c4905daa94d8b2fbad32a4e39c3e3076380363c5
- branch:
luminous
- ceph:
- log-whitelist:
-
slow request
-
reached quota
-
scrub
-
osd_map_max_advance
-
wrongly marked
-
overall HEALTH_
-
\(MGR_DOWN\)
-
\(OSD_
-
\(PG_
-
\(CACHE_
- fs:
xfs
- conf:
- global:
- mon warn on pool no app:
False
- osd heartbeat grace:
100
- mon lease ack timeout:
25
- mon lease:
15
- mon:
- mon debug unsafe allow tier with nonempty snaps:
True
- debug mon:
20
- debug paxos:
20
- debug ms:
1
- osd:
- debug ms:
1
- debug journal:
20
- debug osd:
25
- osd map cache size:
1100
- osd map max advance:
1000
- debug filestore:
20
- sha1:
c4905daa94d8b2fbad32a4e39c3e3076380363c5
- install:
- ceph:
- sha1:
c4905daa94d8b2fbad32a4e39c3e3076380363c5
- admin_socket:
- owner:
scheduled_teuthology@teuthology
- pid:
- roles:
-
['mon.a', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'mgr.x']
-
['mon.b', 'mon.c', 'osd.3', 'osd.4', 'osd.5', 'client.0']
-
['client.1']
- sentry_event:
- status:
pass
- success:
True
- branch:
luminous
- seed:
- sha1:
c4905daa94d8b2fbad32a4e39c3e3076380363c5
- subset:
- suite:
- suite_branch:
luminous
- suite_path:
- suite_relpath:
- suite_repo:
- suite_sha1:
c4905daa94d8b2fbad32a4e39c3e3076380363c5
- targets:
- vpm139.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC02k7hNt5TkEN9dJvvhpaa8T8xvgoycIQHg6oBamBHN+he4ztvFnTyjeYuwkXNMyi2GHHPA6w8T57H/wfFrenBjy1iv6O/9s56r9EMb+GrFQlbpl/OwpiuDCGWSwz+ut8qxAcfBgTJEpYXfQXmsCEQLT6I6bv66dLSdqebPNsU6X9XETzR23tEoMszZYfmxjoWAo6nQJJ6audJSXsZTJHlygibVvr35DDYDQjg2neh/OEwcSUJdp7MC9BH+t1KKWboMpjumnHF9OQ36JBVXvH/DfBmS0kL/17aVxoC72z18Zm2Whqp+PmmU98xb+4q4Tr6ZDy41U3YZ4eLVpOR+ml7
- vpm075.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCiwAii/ZT4rALhkj1qqDyih7gEMVus4lGr4NYE24W74+r/gb70xSmc3mlZJZS/w4A4P08uAYDjqPBiGrRo0SPYo/rnm9Av8BH0/YbuY0SVE6MMXv9V8mVN65d4uo1jaZnANOw1DK5LrDbFAqoWHH3DA3AKZq+q+qETbKlEzIDJ7iBzBplqMq6ztETdnqGsZ5Tm42LE2gNxOA5IDhlJq+0ltQccFO/lVmfhE/PU7jMvbwToXk4AKvP/CNlkRvQzwkCcmvFv4hHrutpOq1BJ8Y7iLjG9qQM00ha4nhGZQW0DzY77ODuoYwj7CACn+UUB5Osa9UVDlKd1X8dOgIzzNNJL
- vpm171.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCjFVyrA1A4KEM4OUINPSVRQTgfyqRQWKJLZ+/mlO4FknQK3ShyjSLIvYXUoqZOn23W/s1z76batilTnoJwfQ68o/X/SB3yj+1gmh4qnRY1CYute0HtmYWvzXnHQFj6QbjzbXNE7cSWlhEfhgPYN748hjVnjMm8jzh355qf1A2sWks4Offp2t6oqPydsV25kBHLsPdIPoQTcuFZkUbcBTyA3BH1Xp53NF+uiON+PG6slpcuSt71m4yeq69/NR/M1fGy4gL7/QW9YO+4MEnNz51+cCgnq/YVCLeDZHAayydruDOhjW9MvcSGy+rDi7LqsRjtFMz/CRcwOtUqbBh6trs7
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- print:
**** v10.2.0 about to install
-
- install:
- tag:
v10.2.0
- exclude_packages:
-
ceph-mgr
-
libcephfs2
-
libcephfs-devel
-
libcephfs-dev
-
librgw2
- sha1:
c4905daa94d8b2fbad32a4e39c3e3076380363c5
-
- print:
**** done v10.2.0 install
-
- ceph:
- fs:
xfs
- skip_mgr_daemons:
True
- sha1:
c4905daa94d8b2fbad32a4e39c3e3076380363c5
- cluster:
ceph
- log-whitelist:
-
slow request
-
reached quota
-
scrub
-
osd_map_max_advance
-
wrongly marked
-
overall HEALTH_
-
\(MGR_DOWN\)
-
\(OSD_
-
\(PG_
-
\(CACHE_
- conf:
- global:
- mon warn on pool no app:
False
- osd heartbeat grace:
100
- mon lease ack timeout:
25
- mon lease:
15
- mon:
- mon debug unsafe allow tier with nonempty snaps:
True
- debug mon:
20
- debug paxos:
20
- debug ms:
1
- osd:
- debug ms:
1
- debug journal:
20
- debug osd:
25
- osd map cache size:
1100
- osd map max advance:
1000
- debug filestore:
20
- add_osds_to_crush:
True
-
- print:
**** done ceph xfs
-
- sequential:
-
- print:
**** done workload v10.2.0
-
- install.upgrade:
- mon.a:
- project:
ceph
- branch:
jewel
- mon.b:
- project:
ceph
- branch:
jewel
- exclude_packages:
-
ceph-mgr
-
libcephfs2
-
libcephfs-devel
-
libcephfs-dev
-
- parallel:
-
workload_jewel
-
upgrade-sequence_jewel
-
- print:
**** done parallel jewel branch
-
- install.upgrade:
- client.1:
- project:
ceph
- branch:
jewel
- exclude_packages:
-
ceph-mgr
-
libcephfs2
-
libcephfs-devel
-
libcephfs-dev
-
- print:
**** done branch: jewel install.upgrade on client.1
-
- install.upgrade:
-
- print:
**** done branch: -x install.upgrade on mon.a and mon.b
-
- parallel:
-
workload_x
-
upgrade-sequence_x
-
- print:
**** done parallel -x branch
-
- exec:
- osd.0:
-
ceph osd set-require-min-compat-client luminous
-
- install.upgrade:
-
- workunit:
- clients:
- client.1:
-
rados/test-upgrade-v11.0.0.sh
-
cls
- branch:
jewel
-
- print:
**** done final test on -x cluster
teuthology_branch:
master
verbose:
True
pcp_grafana_url:
priority:
user:
queue:
posted:
2017-12-31 04:23:26
started:
2017-12-31 04:51:53
updated:
2017-12-31 07:45:55
status_class:
success
runtime:
2:54:02
wait_time:
1:25:22