Description: rados/thrash/{hobj-sort.yaml rados.yaml rocksdb.yaml 0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml clusters/{fixed-2.yaml openstack.yaml} fs/ext4.yaml msgr/async.yaml msgr-failures/osd-delay.yaml thrashers/pggrow.yaml workloads/cache-pool-snaps.yaml}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2016-07-20_14:22:00-rados-wip-yuri-testing_2016_7_20-distro-basic-smithi/325474/teuthology.log

Sentry event: http://sentry.ceph.com/sepia/teuthology/?q=d6dff52a541e4d6cb164c4cebe265c55

Performance graphs: http://pcp.front.sepia.ceph.com:44323/grafana/index.html#/dashboard/script/index.js?time_to=2016-07-23T22%3A18%3A05&time_from=2016-07-23T22%3A01%3A45&hosts=smithi013%2Csmithi014

Failure Reason:

{'smithi013.front.sepia.ceph.com': {'cmd': '/usr/bin/git reset --hard origin/master', '_ansible_no_log': False, 'stdout': '', 'changed': False, 'invocation': {'module_name': 'git', 'module_args': {'force': True, 'track_submodules': False, 'reference': None, 'dest': '/var/lib/teuthworker/.cache/src/keys', 'verify_commit': False, 'clone': True, 'update': True, 'ssh_opts': None, 'repo': 'https://github.com/ceph/keys', 'bare': False, 'refspec': None, 'executable': None, 'remote': 'origin', 'recursive': True, 'accept_hostkey': False, 'depth': None, 'version': 'master', 'key_file': None}}, 'failed': True, 'stderr': "fatal: Unable to create '/home/teuthworker/.cache/src/keys/.git/index.lock': File exists.\n\nIf no other git process is currently running, this probably means a\ngit process crashed in this repository earlier. Make sure no other git\nprocess is running and remove the file manually to continue.\n", 'rc': 128, '_ansible_delegated_vars': {'ansible_host': 'localhost'}, 'stdout_lines': [], 'msg': 'Failed to checkout master'}}

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2016-07-20_14:22:00-rados-wip-yuri-testing_2016_7_20-distro-basic-smithi/325474/teuthology.log
  • archive_path: /var/lib/teuthworker/archive/yuriw-2016-07-20_14:22:00-rados-wip-yuri-testing_2016_7_20-distro-basic-smithi/325474
  • description: rados/thrash/{hobj-sort.yaml rados.yaml rocksdb.yaml 0-size-min-size-overrides/2-size-1-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml clusters/{fixed-2.yaml openstack.yaml} fs/ext4.yaml msgr/async.yaml msgr-failures/osd-delay.yaml thrashers/pggrow.yaml workloads/cache-pool-snaps.yaml}
  • duration: 0:16:41
  • email: ceph-qa@ceph.com
  • failure_reason: {'smithi013.front.sepia.ceph.com': {'cmd': '/usr/bin/git reset --hard origin/master', '_ansible_no_log': False, 'stdout': '', 'changed': False, 'invocation': {'module_name': 'git', 'module_args': {'force': True, 'track_submodules': False, 'reference': None, 'dest': '/var/lib/teuthworker/.cache/src/keys', 'verify_commit': False, 'clone': True, 'update': True, 'ssh_opts': None, 'repo': 'https://github.com/ceph/keys', 'bare': False, 'refspec': None, 'executable': None, 'remote': 'origin', 'recursive': True, 'accept_hostkey': False, 'depth': None, 'version': 'master', 'key_file': None}}, 'failed': True, 'stderr': "fatal: Unable to create '/home/teuthworker/.cache/src/keys/.git/index.lock': File exists.\n\nIf no other git process is currently running, this probably means a\ngit process crashed in this repository earlier. Make sure no other git\nprocess is running and remove the file manually to continue.\n", 'rc': 128, '_ansible_delegated_vars': {'ansible_host': 'localhost'}, 'stdout_lines': [], 'msg': 'Failed to checkout master'}}
  • flavor:
  • job_id: 325474
  • kernel:
    • sha1: distro
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2016-07-20_14:22:00-rados-wip-yuri-testing_2016_7_20-distro-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
        • must scrub before tier agent can activate
      • fs: ext4
      • conf:
        • global:
          • ms inject delay max: 1
          • ms inject delay type: osd
          • osd_pool_default_min_size: 1
          • osd max object namespace len: 64
          • enable experimental unrecoverable data corrupting features: *
          • ms type: async
          • ms inject delay probability: 0.005
          • osd max object name len: 460
          • ms inject socket failures: 2500
          • osd_pool_default_size: 2
          • ms inject internal delays: 0.002
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon keyvaluedb: rocksdb
        • osd:
          • osd op queue cut off: debug_random
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd debug randomize hobject sort order: True
          • osd op queue: debug_random
          • debug filestore: 20
      • sha1: 7fec4023f6301ce7b1b4dabb7327f70dfab87ee5
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
          • debug mon: 1
          • debug paxos: 20
          • debug ms: 20
    • workunit:
      • sha1: 7fec4023f6301ce7b1b4dabb7327f70dfab87ee5
    • install:
      • ceph:
        • sha1: 7fec4023f6301ce7b1b4dabb7327f70dfab87ee5
    • admin_socket:
      • branch: wip-yuri-testing_2016_7_20
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.c', 'osd.0', 'osd.1', 'osd.2', 'client.0']
    • ['mon.b', 'osd.3', 'osd.4', 'osd.5', 'client.1']
  • sentry_event: http://sentry.ceph.com/sepia/teuthology/?q=d6dff52a541e4d6cb164c4cebe265c55
  • status: fail
  • success: False
  • branch: wip-yuri-testing_2016_7_20
  • seed:
  • sha1: 7fec4023f6301ce7b1b4dabb7327f70dfab87ee5
  • subset:
  • suite:
  • suite_branch: wip-yuri-testing__2016_7_18_qa_suites
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1:
  • targets:
    • ubuntu@smithi013.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCoSH0HcJFeNYslWfI4hUCvigyrLj5wEB+L4f6tl7YpJ5gNJfOOEpwVk4XHbKrrmFssGyR/PybtLUsZwk9wDnymjvbYuMf9EuazKYH54MLVqCdRFr+2C5vaNt3nOWzRAZCybO0OLGebDiv50gfs4b1A8NkwTiwip7kAfaBoc5LU+dpIXqQI5YI3UixeIj2uKUAg9EBIw9D2UQw66WvUk1hJwHNDbZI7ivE3WF+wDLBV43RD7NDnxGY/XHPVswJESrcIX2NsmvUWuxJ6L0zmgCzXZQQsBr7e4i+xzdRE1VJkh4N3F8ML3rK8s79FwMW24WLqJYT0TuaYos2OMFLU9BeT
    • ubuntu@smithi014.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaPVbcOFu8T+2qbbhGiwIdHo53hF0VA3obmp4m2ZxR96q0nTPQhS1J5YeHS4gIghiAHxQ52IYzaAmiWyiYNTnsnJatjbFFMGGFCYwdjP6vhS5C+r1hm89xLIQPhxth/1upEQClOmAu3kFFM0K4J5ox0S2G4ZhgP5LJ+W+102my/OaYhJbSQEBTcitcPfCkED1+F7LAX6yhxkdsMPKDoCfhA9Xg4d6HLVpD0MRW4fMG/LWEI1WdyDjgojdKuuJgbRJSQWtxT5n+x6MTrmSHwh1XEJDCUDwQA+Lyrqn8C81dl5PT7KbhMNKMa26O6aOddjVbsSmfjIW43RY1gHprTxbf
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 2
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock.check:
    • install:
    • ceph:
      • log-whitelist:
        • wrongly marked me down
        • objects unfound and apparently lost
      • conf:
        • osd:
          • filestore odsync write: True
          • osd scrub min interval: 60
          • osd scrub max interval: 120
    • thrashosds:
      • chance_pgnum_grow: 2
      • chance_pgpnum_fix: 1
      • timeout: 1200
    • exec:
      • client.0:
        • sudo ceph osd pool create base 4
        • sudo ceph osd pool create cache 4
        • sudo ceph osd tier add base cache
        • sudo ceph osd tier cache-mode cache writeback
        • sudo ceph osd tier set-overlay base cache
        • sudo ceph osd pool set cache hit_set_type bloom
        • sudo ceph osd pool set cache hit_set_count 8
        • sudo ceph osd pool set cache hit_set_period 3600
        • sudo ceph osd pool set cache target_max_objects 250
        • sudo ceph osd pool set cache min_read_recency_for_promote 0
        • sudo ceph osd pool set cache min_write_recency_for_promote 0
    • rados:
      • op_weights:
        • snap_remove: 50
        • write: 100
        • rollback: 50
        • read: 100
        • copy_from: 50
        • snap_create: 50
        • try_flush: 50
        • flush: 50
        • evict: 50
        • delete: 50
      • ops: 4000
      • pool_snaps: True
      • clients:
        • client.0
      • objects: 500
      • pools:
        • base
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url: http://pcp.front.sepia.ceph.com:44323/grafana/index.html#/dashboard/script/index.js?time_to=2016-07-23T22%3A18%3A05&time_from=2016-07-23T22%3A01%3A45&hosts=smithi013%2Csmithi014
  • priority:
  • user:
  • queue:
  • posted: 2016-07-20 21:22:17
  • started: 2016-07-23 21:57:03
  • updated: 2016-07-23 22:22:59
  • status_class: danger
  • runtime: 0:25:56
  • wait_time: 0:09:15