- log_href:
http://qa-proxy.ceph.com/teuthology/mchangir-2021-10-18_13:38:47-upgrade:nautilus-x:onetime:upgrade-nautilus-to-pacific.yaml-wip-mchangir-mds-fix-nautilus-to-pacific-upgrade-issue-pacific-distro-basic-smithi/6448536/teuthology.log
- archive_path:
/home/teuthworker/archive/mchangir-2021-10-18_13:38:47-upgrade:nautilus-x:onetime:upgrade-nautilus-to-pacific.yaml-wip-mchangir-mds-fix-nautilus-to-pacific-upgrade-issue-pacific-distro-basic-smithi/6448536
- description:
upgrade:nautilus-x:onetime:upgrade-nautilus-to-pacific.yaml
- duration:
2:28:45
- email:
- failure_reason:
timeout expired in wait_until_healthy
- flavor:
default
- job_id:
6448536
- kernel:
- last_in_suite:
False
- machine_type:
smithi
- name:
mchangir-2021-10-18_13:38:47-upgrade:nautilus-x:onetime:upgrade-nautilus-to-pacific.yaml-wip-mchangir-mds-fix-nautilus-to-pacific-upgrade-issue-pacific-distro-basic-smithi
- nuke_on_error:
True
- os_type:
centos
- os_version:
8.4
- overrides:
- admin_socket:
- branch:
wip-mchangir-mds-fix-nautilus-to-pacific-upgrade-issue-pacific
- ceph:
- conf:
- client:
- admin socket:
/var/run/ceph/$cluster-$name.$pid.asok
- client mount timeout:
600
- pid file:
/var/run/ceph/$cluster-$name.pid
- rados mon op timeout:
15m
- rados osd op timeout:
15m
- global:
- bluestore warn on legacy statfs:
False
- bluestore warn on no per pool omap:
False
- mon pg warn min per osd:
0
- mon warn on pool no app:
False
- ms bind msgr2:
False
- mds:
- mds bal frag:
True
- mds bal fragment size max:
10000
- mds bal merge size:
5
- mds bal split bits:
3
- mds bal split size:
100
- mds op complaint time:
180
- mds verify scatter:
True
- mds_max_snaps_per_dir:
4096
- osd op complaint time:
180
- rados mon op timeout:
900
- rados osd op timeout:
900
- mgr:
- debug mgr:
20
- debug ms:
1
- mon:
- debug mon:
20
- debug ms:
1
- debug paxos:
20
- mon op complaint time:
180
- mon warn on legacy crush tunables:
False
- mon warn on osd down out interval zero:
False
- osd:
- bdev async discard:
True
- bdev enable discard:
True
- bluestore allocator:
bitmap
- bluestore block size:
96636764160
- bluestore fsck on mount:
True
- debug ms:
1
- debug osd:
20
- mon osd backfillfull_ratio:
0.85
- mon osd full ratio:
0.9
- mon osd nearfull ratio:
0.8
- osd failsafe full ratio:
0.95
- osd objectstore:
bluestore
- osd op complaint time:
180
- fs:
xfs
- log-ignorelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
failed to encode map
-
overall HEALTH_
-
\(FS_
-
\(MDS_
-
\(OSD_
-
\(MON_DOWN\)
-
\(CACHE_POOL_
-
\(POOL_
-
\(MGR_DOWN\)
-
\(PG_
-
\(SMALLER_PGP_NUM\)
-
Monitor daemon marked osd
-
Behind on trimming
-
Manager daemon
-
Not found or unloadable
-
evicting unresponsive client
-
reporting legacy \(not per-pool\) BlueStore omap usage stats
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- sha1:
906f2dc018bed4f5ccc5a460242cf65261b2bb82
- ceph-deploy:
- bluestore:
True
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd:
- bdev async discard:
True
- bdev enable discard:
True
- bluestore block size:
96636764160
- bluestore fsck on mount:
True
- mon osd backfillfull_ratio:
0.85
- mon osd full ratio:
0.9
- mon osd nearfull ratio:
0.8
- osd failsafe full ratio:
0.95
- osd objectstore:
bluestore
- fs:
xfs
- install:
- ceph:
- sha1:
906f2dc018bed4f5ccc5a460242cf65261b2bb82
- selinux:
- whitelist:
-
scontext=system_u:system_r:logrotate_t:s0
- workunit:
- branch:
wip-mchangir-mds-fix-nautilus-to-pacific-upgrade-issue-pacific
- sha1:
f58155f6bfa980bd514bd544bb618888a58e4635
- owner:
scheduled_mchangir@teuthology
- pid:
- roles:
-
['mon.a', 'mon.b', 'mon.c', 'mgr.x', 'mgr.y', 'mds.a', 'mds.b', 'mds.c', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
-
['client.0']
-
['client.1']
- sentry_event:
https://sentry.ceph.com/organizations/ceph/?query=a5c387d3f8514bc4b7b4604764b86182
- status:
fail
- success:
False
- branch:
wip-mchangir-mds-fix-nautilus-to-pacific-upgrade-issue-pacific
- seed:
- sha1:
906f2dc018bed4f5ccc5a460242cf65261b2bb82
- subset:
- suite:
- suite_branch:
wip-mchangir-mds-fix-nautilus-to-pacific-upgrade-issue-pacific
- suite_path:
- suite_relpath:
- suite_repo:
- suite_sha1:
f58155f6bfa980bd514bd544bb618888a58e4635
- targets:
- smithi045.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCahfh1NhClZ5y7VJSZdC7tYqJGovEEfbUSHm+Xyze6KVo5Z9XeZ6jhGiuOhHOBZ1FdWN7/GLMDKZrXnYSZgJaEbqti4XkUhTEyOz1TbjraYPIEowVKvBpDo0+tVUjzicebNYHGoCbOQwgqzL7R+ZLudF11BQ0p8ijoWN7sueb+AXK01Tu3bwml/azJJ8fKMFMy1k+LeF+1ts0O7v0gYBeUifpbptXwRJMucZntZQkNJa08Zr266T1xQDKtqvZjEj0H0uwhgStPyTE7o7gYJhYSqeUBh4TpyY2Uca8z+PDbFs07XfkBF8K2bJivvf3Z7WGZCeCtfvjoohOLU1hJFnvZ7p8DG6QHq4cPsBtXWMX2hddXSAt03cFR4qFbFfmXSEPEwr0lINL5aMiyVgPjAiWwYIdhcehqHWDIFJfw65ievo5ZiwyPXvto0qitoCe/3gjBZlHOhcYoLl9Y7KHxoTFOLZIoUk+v0GrOtS5jArT8o6Bqz2ocUoC9oig4Qv0Sr7s=
- smithi073.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8WaOczKCT6puJsqcF0jnuofUL7ddxpmGQsBlJoUJbw240VDgyC1O6rN5PYun+/nGwQXsOOCAXPHXM2uTpJCrLveYolpWH4n1Eebi7sr37Jz8rpuia+0/4aO0Bh4POWHYqZhXn1TV2rZDjs4MiofhYFMpl4jhPMHGh0lLoN6jFXtj/eET720L0z1/WAJABv44J9kxpvUZYPDc5fX0NQPIYCRb2LFez7RsFML+NAYSp6Djw2esogi63l7tso1Cmsz6O5PAMrDA9WODN1KZsXylKQxc2ObWGM85N0S3Ez6suGEy3jyLIlRkRWsjGrM5TsCNi0zgGX3YkAwEBU6k4hju9vJgq6rNYZs2jLDoBG3G9s00ouDS30NglV5PL7x9mcvKGyx7D4JtIPRmtGSS+1SgQMYGvORUMl19BImXkhIG/Ahd6m81GClM8RksYG43wq/akfq+vBvsfsfEExiujoXZslgcIPZi+QsbI0copFJUru1NhzQh4e6a15ahQwgLU8AE=
- smithi136.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCu1QBdfCJ6DNmTDyXQZY3wncLslswJVTf43XAT5EW3YnjskGK8COwUo4XtvFduPtjwE9JgRsb5XfmaSuNvuNzT4xYvpqfPffNSYGqDyhPNvA/VeDAHHBxAYeCzfhAl92yMH3cyYipip3QnI5JCMUuDYFniDaI6ZN2iUilCTnYqkueWDhK4dxWU2E51mMYH3yUQGsqQ5X+LiYM4XkEhRVcRizXSoC3gdF9hY74B0effEDuyqNI00mp1GkhtNV9u+CBYebpgfAJPPeao6HPAM0D7kN1cy7dgRK/aeZgkehA+jXCqdnD9CJVoxdN09Xluc7C33RJMls2r426Rd0vqnWrY8e07k3JNHSa2BQBOkaoub8wXQOF72Bhapk6NHvIpMtlPJlP1Z2G8N/3XC2L9Rp2HBPIAmmoPxgsVbSgdLxfIXqnAxJ3T6W8xb+BWPsn0b64efLbO91QVHLtONnCAr6ReLg8dSI7lAAUVlhNf7/pbVW5ppvEGc0Gtk/9kHCClX+8=
- tasks:
-
- internal.buildpackages_prep:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock:
-
- exec:
-
- install:
- branch:
nautilus
- exclude_packages:
-
libcephfs-dev
-
librados3
-
ceph-mgr-dashboard
-
ceph-mgr-diskprediction-local
-
ceph-mgr-rook
-
ceph-mgr-cephadm
-
cephadm
-
ceph-immutable-object-cache
-
python3-rados
-
python3-rgw
-
python3-rbd
-
python3-cephfs
- extra_packages:
- extra_system_packages:
- deb:
-
bison
-
flex
-
libelf-dev
-
libssl-dev
-
network-manager
-
iproute2
-
util-linux
-
dump
-
indent
-
libaio-dev
-
libtool-bin
-
uuid-dev
-
xfslibs-dev
- rpm:
-
bison
-
flex
-
elfutils-libelf-devel
-
openssl-devel
-
NetworkManager
-
iproute
-
util-linux
-
libacl-devel
-
libaio-devel
-
libattr-devel
-
libtool
-
libuuid-devel
-
xfsdump
-
xfsprogs
-
xfsprogs-devel
-
libaio-devel
-
libtool
-
libuuid-devel
-
xfsprogs-devel
- sha1:
906f2dc018bed4f5ccc5a460242cf65261b2bb82
-
- print:
**** done installing nautilus
-
- print:
**** starting ceph daemons
-
- ceph:
- create_rbd_pool:
False
- mon.a:
- conf:
- client:
- admin socket:
/var/run/ceph/$cluster-$name.$pid.asok
- client mount timeout:
600
- pid file:
/var/run/ceph/$cluster-$name.pid
- rados mon op timeout:
15m
- rados osd op timeout:
15m
- global:
- bluestore warn on legacy statfs:
False
- bluestore warn on no per pool omap:
False
- mon pg warn min per osd:
0
- mon warn on pool no app:
False
- ms bind msgr2:
False
- mds:
- mds bal frag:
True
- mds bal fragment size max:
10000
- mds bal merge size:
5
- mds bal split bits:
3
- mds bal split size:
100
- mds op complaint time:
180
- mds verify scatter:
True
- mds_max_snaps_per_dir:
4096
- osd op complaint time:
180
- rados mon op timeout:
900
- rados osd op timeout:
900
- mgr:
- debug mgr:
20
- debug ms:
1
- mon:
- debug mon:
20
- debug ms:
1
- debug paxos:
20
- mon op complaint time:
180
- mon warn on legacy crush tunables:
False
- mon warn on osd down out interval zero:
False
- osd:
- bdev async discard:
True
- bdev enable discard:
True
- bluestore allocator:
bitmap
- bluestore block size:
96636764160
- bluestore fsck on mount:
True
- debug ms:
1
- debug osd:
20
- mon osd backfillfull_ratio:
0.85
- mon osd full ratio:
0.9
- mon osd nearfull ratio:
0.8
- osd failsafe full ratio:
0.95
- osd objectstore:
bluestore
- osd op complaint time:
180
- fs:
xfs
- log-ignorelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
failed to encode map
-
overall HEALTH_
-
\(FS_
-
\(MDS_
-
\(OSD_
-
\(MON_DOWN\)
-
\(CACHE_POOL_
-
\(POOL_
-
\(MGR_DOWN\)
-
\(PG_
-
\(SMALLER_PGP_NUM\)
-
Monitor daemon marked osd
-
Behind on trimming
-
Manager daemon
-
Not found or unloadable
-
evicting unresponsive client
-
reporting legacy \(not per-pool\) BlueStore omap usage stats
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
- sha1:
906f2dc018bed4f5ccc5a460242cf65261b2bb82
- cluster:
ceph
-
- exec:
-
- print:
**** done starting ceph daemons
-
- print:
**** starting ceph-fuse
-
- ceph-fuse:
-
- exec:
-
- print:
**** done starting ceph-fuse
-
- parallel:
-
- workunit:
- clients:
- branch:
wip-mchangir-mds-fix-nautilus-to-pacific-upgrade-issue-pacific
- sha1:
f58155f6bfa980bd514bd544bb618888a58e4635
-
- exec:
- mon.a:
-
for ((i = 0; i < 15; i++)); do ceph --cluster ceph -s --format=json; sleep 60; done
-
- exec:
- client.0:
-
for ((i = 1; i <= 1260; i++)); do mkdir /home/ubuntu/cephtest/mnt.0/.snap/snap${i}; sleep 5; done
exec:
- client.0:
-
for ((i = 1; i <= 1260; i++)); do rmdir /home/ubuntu/cephtest/mnt.0/.snap/snap${i}; done
-
sleep 120
- mon.a:
exec:
parallel:
sleep:
ceph.stop:
- daemons:
-
mds.a
-
mds.b
-
mds.c
-
osd.0
-
osd.1
-
osd.2
-
osd.3
-
mgr.x
-
mgr.y
-
mon.a
-
mon.b
-
mon.c
ceph-fuse:
sleep:
print:
**** about to restart ceph
ceph.restart:
- daemons:
- wait-for-healthy:
False
ceph.restart:
- daemons:
- wait-for-healthy:
False
- wait-for-osds-up:
True
ceph.restart:
- daemons:
- wait-for-healthy:
True
- wait-for-osds-up:
True
sleep:
ceph.healthy:
print:
**** we have a healthy system!
exec:
- mon.a:
-
ceph osd require-osd-release pacific --yes_i_really_mean_it
ceph-fuse:
parallel:
exec:
- mon.a:
-
for ((i = 0; i < 15; i++)); do ceph --cluster ceph -s --format=json; sleep 60; done
exec:
- client.0:
-
for ((i = 1; i <= 1260; i++)); do mkdir /home/ubuntu/cephtest/mnt.0/.snap/snap${i}; sleep 5; done
exec:
- client.0:
-
for ((i = 1; i <= 1260; i++)); do rmdir /home/ubuntu/cephtest/mnt.0/.snap/snap${i}; done
- mon.a:
sleep:
teuthology_branch:
master
verbose:
False
pcp_grafana_url:
priority:
user:
queue:
posted:
2021-10-18 13:38:51
started:
2021-10-18 13:38:52
updated:
2021-10-18 16:17:56
status_class:
danger
runtime:
2:39:04
wait_time:
0:10:19