- log_href:
http://qa-proxy.ceph.com/teuthology/haomai-2016-07-30_09:57:47-rados-wip-haomai-testing-distro-basic-vps/342397/teuthology.log
- archive_path:
/var/lib/teuthworker/archive/haomai-2016-07-30_09:57:47-rados-wip-haomai-testing-distro-basic-vps/342397
- description:
rados/thrash-erasure-code-big/{leveldb.yaml rados.yaml cluster/{12-osds.yaml openstack.yaml} fs/xfs.yaml msgr-failures/few.yaml thrashers/fastread.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
- duration:
2:30:19
- email:
haomaiwang@gmail.com
- failure_reason:
- flavor:
basic
- job_id:
342397
- kernel:
- last_in_suite:
False
- machine_type:
vps
- name:
haomai-2016-07-30_09:57:47-rados-wip-haomai-testing-distro-basic-vps
- nuke_on_error:
True
- os_type:
- os_version:
- overrides:
- ceph:
- log-whitelist:
- fs:
xfs
- conf:
- global:
- ms inject socket failures:
5000
- mon:
- debug ms:
1
- debug mon:
20
- debug paxos:
20
- mon keyvaluedb:
leveldb
- osd:
- osd op queue cut off:
debug_random
- debug ms:
1
- debug journal:
20
- debug osd:
20
- osd debug verify missing on start:
True
- osd op queue:
debug_random
- osd sloppy crc:
True
- debug filestore:
20
- osd heartbeat use min delay socket:
True
- sha1:
c3fa3f08fcf570c0a193090b8e62598d3b2af116
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd default pool size:
2
- debug mon:
1
- debug paxos:
20
- debug ms:
20
- branch:
- workunit:
- sha1:
c3fa3f08fcf570c0a193090b8e62598d3b2af116
- install:
- ceph:
- sha1:
c3fa3f08fcf570c0a193090b8e62598d3b2af116
- admin_socket:
- branch:
wip-haomai-testing
- owner:
haomai
- pid:
- roles:
-
['osd.0', 'osd.1', 'osd.2', 'client.0', 'mon.a']
-
['osd.3', 'osd.4', 'osd.5', 'mon.b']
-
['osd.6', 'osd.7', 'osd.8', 'mon.c']
-
['osd.9', 'osd.10', 'osd.11']
- sentry_event:
- status:
pass
- success:
True
- branch:
wip-haomai-testing
- seed:
- sha1:
c3fa3f08fcf570c0a193090b8e62598d3b2af116
- subset:
- suite:
- suite_branch:
master
- suite_path:
- suite_relpath:
- suite_repo:
- suite_sha1:
- targets:
- ubuntu@vpm163.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+0dI0GPn+aA3PcbW+sIM1rIorJa5i4yYYQZ1fJ70rKNj/Iszr93hkKjZqKl9NjZtrZPMrwAxyRFsfpfKPBlvLRe3AM7Ety6gGI1zIojeReay0hbm+BVLVlYhjREKE5pGIRHsif56jyIDkbOQ8m52Kh0bHxI01aSEkVgsx3Vzvwp4YYbTrUHY0kozrK16/gKep0SiviCRjorSRTVMhTVko2xvtRlmIJVgTVzZEWygmECn86tVBYNk+M3JTkkgdzxEUg7rtrRzF/0LzyyX6K8iGdeWePhP0pm/xXO55llnskdAY1EtIl4C72aHa7qBSWwzYUwFheAF8rL+RLvUEYYx1
- ubuntu@vpm059.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDCGZLDalRgzsvdaE76EtheykQglLYnD/LYAbwu8x86q6oL8n/6bWG7kUOtyd1HzTQS7aFPgSv3bAryQ9HXsYNQL6/oAkljE0dxZk1J59ur0yBx/dSHpY5Wuk22EaOFrtbV3VbPqNau18o686J3YGfdG9vzFJ2w+pAb0xsPmxbYkZBEH9Rtoe/zdHLOos4CsIYNPeCMHbdTheMTggQjn07cd7yGsLdYeJiSdU0iY14AfVV1ABYizOijZwluhkSzbXJ7bRlNWT6XfN9xoFADUf0wObk+J+EK2i6GpAmZPfrALhSY0uXnwjuKQe6RUNEApkBHUMwR9zeMStf8PlkcWtpX
- ubuntu@vpm031.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDITFizPpOSMCR7XjYYoPhKIXjn8r56Q/GoHiYs7H7tQ/XS16IFo3Z9TCFpsUfO4uwAg9J3yENnUGryHQq7uhOSHvD7QTMdC1WibDyyDASaO31aK5FjgYiS+YzxTa5aVM/shzSgY0xPA0I6Z4xXArFSYa8XLWNkbX0fqszjcQCfh4s1UZGWlZ601ThHeED2T/z/oN0+707kRwFYtqfjZ1p2TlG5sBWKBENfdd/CNv0Dgx3tWWWMxYTEmLWNAMzKKtK15iCfSLyIlyXS9aMkafnHVGszeYZRpViuMT3IwUxhcQKLSeMhOGODr/c4DXj041zPMP+11uu0fWc5cWghfzij
- ubuntu@vpm119.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdylQmb/s4eItZ24/T4enBjtdB9H/5P6TF3yOUmR8fLtRURCBRpJ2SWVuYkrJNfnvn2H4YUVpj6+amhY/As1X7qEkxpt327w/IrxdgGxhz1V77wXgR/3vRxYjjwo+n0qWlIXv+2ghtg5QWiGRc3HoVZ1tzZICKbcyFcacYEJ3bSrsj89WLItOHawEHksc+yYHdvY5l0sXQQd+NQc5cceu2kaa9Q5n+ikwOA2UZnVNIVHn5SkNfk7p4C3ySYgKrnuvctg6ECtc5titnnR4ANcvHCy+FA1d08AeiDe4eNq10mOL00qVxenoz0f0xRKS86RoKhDVDgJHnqdiW+Jc8cFMN
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock.check:
-
- ansible.cephlab:
-
- clock.check:
-
- install:
-
- ceph:
- cluster:
ceph
- log-whitelist:
-
wrongly marked me down
-
objects unfound and apparently lost
-
slow request
- conf:
- global:
- ms inject socket failures:
5000
- mon:
- debug paxos:
20
- debug mon:
20
- debug ms:
1
- mon osd pool ec fast read:
1
- mon keyvaluedb:
leveldb
- osd:
- osd op queue cut off:
debug_random
- debug ms:
1
- debug journal:
20
- osd scrub max interval:
120
- osd max backfills:
1
- debug osd:
20
- osd debug reject backfill probability:
0.3
- osd debug verify missing on start:
True
- osd op queue:
debug_random
- osd sloppy crc:
True
- osd scrub min interval:
60
- debug filestore:
20
- osd heartbeat use min delay socket:
True
- fs:
xfs
- sha1:
c3fa3f08fcf570c0a193090b8e62598d3b2af116
-
- thrashosds:
- dump_ops_enable:
true
- optrack_toggle_delay:
2.0
- noscrub_toggle_delay:
2.0
- chance_pgpnum_fix:
1
- timeout:
1200
- min_in:
4
- chance_pgnum_grow:
1
- sighup_delay:
0.1
-
- rados:
- op_weights:
- snap_remove:
50
- write:
0
- rollback:
50
- setattr:
25
- read:
100
- copy_from:
50
- snap_create:
50
- rmattr:
25
- append:
100
- delete:
50
- ops:
400
- ec_pool:
True
- clients:
- objects:
50
- erasure_code_profile:
- name:
lrcprofile
- plugin:
lrc
- k:
4
- m:
2
- l:
3
- ruleset-failure-domain:
osd
- write_append_excl:
False
teuthology_branch:
master
verbose:
False
pcp_grafana_url:
http://pcp.front.sepia.ceph.com:44323/grafana/index.html#/dashboard/script/index.js?time_to=2016-07-31T16%3A13%3A09&time_from=2016-07-31T13%3A43%3A47&hosts=vpm059%2Cvpm119%2Cvpm163%2Cvpm031
priority:
user:
queue:
posted:
2016-07-30 14:12:34
started:
2016-07-31 06:57:48
updated:
2016-07-31 16:19:19
status_class:
success
runtime:
9:21:31
wait_time:
6:51:12