Description: kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml tasks/data-scan.yaml xfs.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2017-09-03_05:20:02-kcephfs-kraken-testing-basic-ovh/1594091/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2017-09-03_05:20:02-kcephfs-kraken-testing-basic-ovh/1594091/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2017-09-03_05:20:02-kcephfs-kraken-testing-basic-ovh/1594091
  • description: kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml tasks/data-scan.yaml xfs.yaml}
  • duration: 0:34:38
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 1594091
  • kernel:
    • flavor: basic
    • sha1: 9bb9aa1af82be2cd30f5b7d47f81b436c6981ef2
    • kdb: True
  • last_in_suite: False
  • machine_type: ovh
  • name: teuthology-2017-09-03_05:20:02-kcephfs-kraken-testing-basic-ovh
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
        • bad backtrace
        • object missing on disk
        • error reading table object
        • error reading sessionmap
        • unmatched fragstat
        • was unreadable, recreating it now
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • osd:
          • debug osd: 25
          • debug filestore: 20
          • debug journal: 20
          • debug ms: 1
          • osd sloppy crc: True
      • sha1: e0354f9d3b1eea1d75a7dd487ba8098311be38a7
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • workunit:
      • sha1: e0354f9d3b1eea1d75a7dd487ba8098311be38a7
      • branch: kraken
    • install:
      • ceph:
        • sha1: e0354f9d3b1eea1d75a7dd487ba8098311be38a7
    • admin_socket:
      • branch: kraken
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'osd.0', 'mds.a', 'mds.c', 'client.2']
    • ['osd.1', 'osd.2', 'mds.b', 'mds.d', 'client.3']
    • ['client.0']
    • ['client.1']
  • sentry_event:
  • status: pass
  • success: True
  • branch: kraken
  • seed:
  • sha1: e0354f9d3b1eea1d75a7dd487ba8098311be38a7
  • subset:
  • suite:
  • suite_branch: kraken
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: e0354f9d3b1eea1d75a7dd487ba8098311be38a7
  • targets:
    • ovh097.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIxcK5/NWylJfvkcaUd8fx6BVM6VRhsYJpXoWFEAEl9LvVnkbipSMctP+rP3mDYhdZPs8bZth12YjClr8fSu4Vz7Oy7dAaZwD0ALq8Hmag2A1i+F2Qqgi0C0pywVFmKOhlcNZmhtWFVj4oTuOTezoh/LXb4Qm+EROcelOxfVeBb99BfC7QpMfQYAxIDNJD0lBiXG2Qdwrfk7JnDV4L1pEmSAfNgTFMHd3JNM3lG6YL1EAJ7KalrXZVH+EVEdso/W8LkGSczKJYeTQMD2hqkEYoomXo0jJS/Y60mnzsw5Ax1BlzElEC5qs9H4pM7Vv75dDFCiYO/v6Ws+E3rUyBbjb9
    • ovh094.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6fhlR4WjRFWp5CE6RFxEmi99SFUcGFmmE3o0AClV0TgSDLGKi5sPmBw/divyjL/eBV2DIWhfjNXK1yc3RQdSWMC0rypYUj+94z1Cti7bpsNta8QJP3Dx3Is6MFGdzSxBqJ9sbgxvW9Cw09vqYKEglNjABtgm+OiIeC/zN4b/hNUAF5tSzCRapP6izH/prN2cklsSPi18g0nnF6DKTU+7g+K8KBEHOvSPOrndA9kS89MkUmQzdudssAk3zCnbrYP/5wbBLbouLi3vfxvBHic1MFv2hP9xFerBPZDpzHJ2EvwZ3dUr7f4rPPFHMyzGxD4DPDVZaFh66TJO/UgNubaBP
    • ovh006.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0VFtBgbzor5QAjhYCSIZ5e+AVosT1EFGYsBFdZAhLjR18EPtS8/47+t2XY+YoY93u56A14k9YbYfqW0SflQviZzHNx5eaHAO/oldYgUstyK+el0VxmMQQsc0mGng5a34O04YKuFtCMy3kLWJ6fUogEf8ya8srIbBZe8WK886FzR65ksbFvtHZGonehtW9v4sS13cTyz3oNie32OMgynzbWpIPj+fytipGvLpLWslF0/tUXjE0iJ9xUkw6EUaJ7KbOwoRpvrRlMDEO0AJrNKCfwVIBIQR1eElGrbTb/a7Qg3wDiaQ6iqTYTXycnf5gkBLtkMOgyLFI1QznPaac4BAj
    • ovh038.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTAvL3hmcyHf7SGpXURLDYGyD2zTivGwBaD89Vj+tmZ6z/o2gknD0DklwwWyAeilkQEBCREyAhmdrXtS6/J25p8VCvpFavpXJKVx3xgS1FWhdzcFLyhN/t6//YOFL3HTbd3uih+dPQCWeZLxibZKl1ESbefouCCmUz8EDpHWU9K6lX8saVtrckCoCnUBLZRdjzQ1YH8y4T/sYkLVdy6a+OhGWRvcsnc6Pzg/ZdPyYhl5Tj1Kp1CL+xICwTOiiX2MPuZSPa5Er+v7uqnO1eWAFn28sXQtwGigGSz6G3rfEtp+ymauO4FpefRYKdw17o+C6DJYvYHWMC4bz63E+V1YIn
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 4
      • ovh
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 9bb9aa1af82be2cd30f5b7d47f81b436c6981ef2
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_data_scan
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2017-09-03 05:20:34
  • started: 2017-09-03 20:29:13
  • updated: 2017-09-03 23:09:14
  • status_class: success
  • runtime: 2:40:01
  • wait_time: 2:05:23