Description: kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml tasks/auto-repair.yaml xfs.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2017-07-18_05:20:02-kcephfs-kraken-testing-basic-ovh/1411684/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2017-07-18_05:20:02-kcephfs-kraken-testing-basic-ovh/1411684/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2017-07-18_05:20:02-kcephfs-kraken-testing-basic-ovh/1411684
  • description: kcephfs/recovery/{clusters/4-remote-clients.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml tasks/auto-repair.yaml xfs.yaml}
  • duration: 0:16:04
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 1411684
  • kernel:
    • flavor: basic
    • sha1: bb89546eb29c76f521b57ddb5df3f9666cfea991
    • kdb: True
  • last_in_suite: False
  • machine_type: ovh
  • name: teuthology-2017-07-18_05:20:02-kcephfs-kraken-testing-basic-ovh
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
        • force file system read-only
        • bad backtrace
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • osd:
          • debug osd: 25
          • debug filestore: 20
          • debug journal: 20
          • debug ms: 1
          • osd sloppy crc: True
      • sha1: 9860e658189e3ac775102b3f66676b0a77a80ba7
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • workunit:
      • sha1: 9860e658189e3ac775102b3f66676b0a77a80ba7
      • branch: kraken
    • install:
      • ceph:
        • sha1: 9860e658189e3ac775102b3f66676b0a77a80ba7
    • admin_socket:
      • branch: kraken
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'osd.0', 'mds.a', 'mds.c', 'client.2']
    • ['osd.1', 'osd.2', 'mds.b', 'mds.d', 'client.3']
    • ['client.0']
    • ['client.1']
  • sentry_event:
  • status: pass
  • success: True
  • branch: kraken
  • seed:
  • sha1: 9860e658189e3ac775102b3f66676b0a77a80ba7
  • subset:
  • suite:
  • suite_branch: kraken
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 9860e658189e3ac775102b3f66676b0a77a80ba7
  • targets:
    • ovh040.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZBUPZm5JX3aCj1NA5q0D1/EYlqW2djNbgY1L149PmeDe6AHGbEGVt5sIxrsbHm099WhwhUq6Y6LBqWbbXm63PAYwHSnY7K0Cuj/gs8XACRATZkM/pY3E92B9gm3vq+z0LfXMCmW2hAn3nSWI09zsa8k0NkRV6kWvJwLRVCTfv/nYaMQo8xHCDoWBuzQwU7N3vCkvGnaxIwshana3/1OuALsC4aK6uxKuFzaKKZARDogcvuDXthMynX4nxNL14bHsOIDly/obxNwW+piG8dfkAjPPwnK5ksY1q4kCYF2hzLVemDTihDNRikJBoSsLSFpQyFftVxUvdRl3OP5B4bRJl
    • ovh054.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBvsJ67MjM4rwyRmyqkuEit+hyTMq6XXI9g5hQbwH6LJRdyl/Uz9vi2Df4d3UvMIJAAaUnegXsxKzYmhBIaOww6K90s9GZ9sZvkRzs2FMwkN7c+gds5XpTyPFL/mYngZslmz6yXmlHhuhqqc1VlD0KSqPr85YbN0TzXpsIXmRQZY+LbyAzsq0EiUAil6ppiuHaE7E9ik+s2PCxKC64oGxGtN0zGuC/VGICwBxV1gI42N8OygNub5KcD4wsP3h+xrN8R06+zWYc8l4jJATl/+/XWq13yBwQHxmDCSH+IcROq4U1RB9+NdkYkpfItXMloovIi94zta30RlhNWlTY8s2Z
    • ovh044.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtZ9zOMAIHbV0GH1DwoUIPZPcz/sYHmKbiMbZQgwaI0v1/KaGj4r+IvzklNwaaUuxouHNS4Kz6x2KzF4cbcs43FIBsYvkP5DHNbtsMvytSyDo7Ofu4+oeDdDqQ53RSXOwCRopoz8QrO4b+xzQD0Nz5RcoVn2T+xA7eSfABnIkiJ53Z+eBQRReQCU6rWhMJqLyv5skkAvllw2yjqErfFftneywWtrbHdjsA4yAmQ2WBX8O5uZCaoiSkz8IXWJ8BxSBoQnEEXS1zYSkQRV8HPY6tppzRZTMhCFRmEkr0k3WZjd5BjJgCSkemaK3b7+QDB/lRDB1zlKwgGhFy3pM0Jqgt
    • ovh069.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHf8C0GeoBpZCUQOsUqytvJSmfLy8f+W02M9InbhmC+4xjTbBG6ijh/t9l5D6C8uLXLb5xbgUw/khx8KpnTbbLTLJmQtgrgLsjO9mzMfAoKbf7CY2nRQTMdJLUS+JrCOJrgpt0veoMTjgCxlylIyrRmqCOuTKJ1yMkS9Rzbm3w605YaaVgUx6N9YWqm4yqXHtszwm7TVTnTxULUHL6r16TDGDg56yC8nqswcjpvHUT812m0XJZ98lNuzt7C7c0HYsML6IGtmhvy+JE2cT5/hNrhznswjdpRnK+MgjHz9fflWNLjZeUuqgVhoU9XQkPmj80AWcpTVZMTDnZzJuuGk0f
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 4
      • ovh
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: bb89546eb29c76f521b57ddb5df3f9666cfea991
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_auto_repair
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2017-07-18 05:20:18
  • started: 2017-07-18 18:36:30
  • updated: 2017-07-18 21:36:33
  • status_class: success
  • runtime: 3:00:03
  • wait_time: 2:43:59