- log_href:
http://qa-proxy.ceph.com/teuthology/teuthology-2016-12-31_11:30:02-rados-kraken-distro-basic-smithi/678652/teuthology.log
- archive_path:
/home/teuthworker/archive/teuthology-2016-12-31_11:30:02-rados-kraken-distro-basic-smithi/678652
- description:
rados/thrash-erasure-code-big/{cluster/{12-osds.yaml openstack.yaml} fs/btrfs.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore.yaml rados.yaml thrashers/fastread.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
- duration:
0:11:28
- email:
ceph-qa@ceph.com
- failure_reason:
- flavor:
basic
- job_id:
678652
- kernel:
- last_in_suite:
False
- machine_type:
smithi
- name:
teuthology-2016-12-31_11:30:02-rados-kraken-distro-basic-smithi
- nuke_on_error:
True
- os_type:
- os_version:
- overrides:
- ceph:
- log-whitelist:
- fs:
btrfs
- conf:
- global:
- ms inject socket failures:
5000
- mon:
- debug ms:
1
- debug mon:
20
- debug paxos:
20
- mon keyvaluedb:
leveldb
- osd:
- osd op queue cut off:
debug_random
- debug ms:
1
- debug journal:
20
- debug osd:
25
- osd objectstore:
filestore
- osd debug verify cached snaps:
True
- osd debug verify missing on start:
True
- osd op queue:
debug_random
- osd sloppy crc:
True
- osd op thread timeout:
60
- debug filestore:
20
- osd heartbeat use min delay socket:
True
- sha1:
cc0e1cf0681e673cc4ec0fa2e4f3b5b98cd65c62
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- osd default pool size:
2
- debug mon:
1
- debug paxos:
20
- debug ms:
20
- workunit:
- sha1:
cc0e1cf0681e673cc4ec0fa2e4f3b5b98cd65c62
- install:
- ceph:
- sha1:
cc0e1cf0681e673cc4ec0fa2e4f3b5b98cd65c62
- admin_socket:
- owner:
scheduled_teuthology@teuthology
- pid:
- roles:
-
['osd.0', 'osd.1', 'osd.2', 'client.0', 'mon.a']
-
['osd.3', 'osd.4', 'osd.5', 'mon.b']
-
['osd.6', 'osd.7', 'osd.8', 'mon.c']
-
['osd.9', 'osd.10', 'osd.11']
- sentry_event:
- status:
pass
- success:
True
- branch:
kraken
- seed:
- sha1:
cc0e1cf0681e673cc4ec0fa2e4f3b5b98cd65c62
- subset:
- suite:
- suite_branch:
kraken
- suite_path:
- suite_relpath:
- suite_repo:
- suite_sha1:
cc0e1cf0681e673cc4ec0fa2e4f3b5b98cd65c62
- targets:
- ubuntu@smithi046.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCjCgmaH6vPnOjp6xGDE+lZVHhKUBPSRO8YfFlSlkm6QRibY5zaZxnd+1zXbCJvrOaS5mo7mYP+/XUP/H3RRJBDMaKS0vRCfnGlOz+r+peew7Zy8CRqZPbtTJo/6LDLb8nPpH8TGQLYXIMSEJbwyiM8+mZgMOff5LaADx/Tn0369UC9BN6DmHaonTaYA85wcQAAR2WOqoOeAvN2UnBLCZCG+4QO5V/JdraDQNHZOO21WEzUwO9uHHO49NSZ1ueYVZptdF7u8XCVMMZIPqU3fqhw9XsKab1+JWWLu7Y+qwAbrN2/klU61/ziYpw2TtI1EPOynWmn0nEk5Id3xe3HGOzD
- ubuntu@smithi015.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZVTBOQeUHA3ivNGDBwF5804SWiFYAXBz5c23Rgvik4lpjMMw9bctpXQpVNKa3Mzcau2hvRA2mAC+r5UEPH8xoEkr+TykPg3LSZloMEx1yMs6evxxFJHfd/IqtW+G/tnEXYcwtypwoNKeDXC55a0wlPxAfGc6KzHcNDinQ+7RvkXOEoGe7LGvrjtayGpdJdwPPxhKp1tMV8jf59ivoWrAeaCf9uakm7xqj6DBzB/X4ZrSSXd6On1/3qcA3+sd4qG9fn62z/C+F1Mn2dp8gzbzPcmBzNI3SobEMB5mBvmRsrP9gbe+Y9VAjNF0MNLibKuWN/MilAWm/hC7ouEyLGQX/
- ubuntu@smithi062.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwBR9LVk3oNnZ8QOf3nEciy03C+sBZ1MNjyEHRnZBJUC+DQWvWU4kIyGAiVQS1TuQ9yYZh2TY2URO6Me0TyZoyqx3TbSzlMlyVAJN9Eq2vuTPFkqDtzmbwdg7cUhVgqgZ6Jc8PltqpdjW8PjYmLUAd09OxP6H6Zv7FaYKk+L6p6JcBRPQdWi8Xu39YOUTpcCQDxImNHXseT5Lp+CWpV8gaZ0HhC1cXoOGYjJoG2jzfa4u745025nT05TSe+ukLmbWY/9DMCWxMXLop/9BsSCvZWpVeTvtO3ZEvmHHZ3cV3AHlYtbdtNGTV6M1C/BQwXXa9sChQMF7AvWoikZBGfpE/
- ubuntu@smithi110.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDrVxvP9Y2PwxYaKf9sdaPIdsMgAx5CI16qoTh1pfkHtRB5b/e6iIltN4OW7Jzaytq53sBZ+WC7bTNnsvVjO2q2N0tP2fooOTOFg/qpqvLpCb9vafPvpBg/wsk/nqUCQSrjpI01i9f7FImfuE6ka9PQuAjz2TvmRAoFTwq5a1if7/gC6cz/ahhG2CaHQyn5QDj5457z0EIfcg6ef69b9IjIPez87wb7URdbQQgw6Dy4XQJL7qOfipFlW8X5PRX3lmbOKo8145To8cJooZHHFP+DQMSZ2poIe7SHlcqxSUNydf47HfMhkmN3zeaiqBvBEK1oiFRVJlVUCTcOxWe9WWST
- tasks:
-
- internal.buildpackages_prep:
-
- internal.lock_machines:
-
- internal.save_config:
-
- internal.check_lock:
-
- internal.add_remotes:
-
- console_log:
-
- internal.connect:
-
- internal.push_inventory:
-
- internal.serialize_remote_roles:
-
- internal.check_conflict:
-
- internal.check_ceph_data:
-
- internal.vm_setup:
-
- kernel:
-
- internal.base:
-
- internal.archive_upload:
-
- internal.archive:
-
- internal.coredump:
-
- internal.sudo:
-
- internal.syslog:
-
- internal.timer:
-
- pcp:
-
- selinux:
-
- ansible.cephlab:
-
- clock.check:
-
- install:
-
- ceph:
- cluster:
ceph
- log-whitelist:
-
wrongly marked me down
-
objects unfound and apparently lost
-
slow request
- conf:
- global:
- ms inject socket failures:
5000
- mon:
- debug paxos:
20
- debug mon:
20
- debug ms:
1
- mon osd pool ec fast read:
1
- mon keyvaluedb:
leveldb
- osd:
- osd op queue cut off:
debug_random
- debug ms:
1
- debug journal:
20
- osd scrub max interval:
120
- osd max backfills:
2
- debug osd:
25
- osd objectstore:
filestore
- osd debug reject backfill probability:
0.3
- osd debug verify missing on start:
True
- osd op queue:
debug_random
- osd sloppy crc:
True
- osd debug verify cached snaps:
True
- osd scrub min interval:
60
- osd op thread timeout:
60
- debug filestore:
20
- osd heartbeat use min delay socket:
True
- fs:
btrfs
- sha1:
cc0e1cf0681e673cc4ec0fa2e4f3b5b98cd65c62
-
- thrashosds:
- dump_ops_enable:
true
- optrack_toggle_delay:
2.0
- noscrub_toggle_delay:
2.0
- chance_pgpnum_fix:
1
- timeout:
1200
- min_in:
4
- chance_pgnum_grow:
1
- sighup_delay:
0.1
-
- rados:
- op_weights:
- snap_remove:
50
- write:
0
- rollback:
50
- setattr:
25
- read:
100
- copy_from:
50
- snap_create:
50
- rmattr:
25
- append:
100
- delete:
50
- ops:
400
- ec_pool:
True
- clients:
- objects:
50
- erasure_code_profile:
- name:
lrcprofile
- plugin:
lrc
- k:
4
- m:
2
- l:
3
- ruleset-failure-domain:
osd
- write_append_excl:
False
teuthology_branch:
master
verbose:
True
pcp_grafana_url:
http://pcp.front.sepia.ceph.com:44323/grafana/index.html#/dashboard/script/index.js?time_to=2016-12-31T22%3A49%3A30&time_from=2016-12-31T22%3A39%3A35&hosts=smithi062%2Csmithi015%2Csmithi046%2Csmithi110
priority:
user:
queue:
posted:
2016-12-31 11:31:31
started:
2016-12-31 22:39:06
updated:
2016-12-31 22:53:06
status_class:
success
runtime:
0:14:00
wait_time:
0:02:32