Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/strays.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2018-08-13_05:20:02-kcephfs-mimic-testing-basic-ovh/2900253/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2018-08-13_05:20:02-kcephfs-mimic-testing-basic-ovh/2900253/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2018-08-13_05:20:02-kcephfs-mimic-testing-basic-ovh/2900253
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/strays.yaml whitelist_health.yaml}
  • duration: 0:42:00
  • email: ceph-qa@lists.ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 2900253
  • kernel:
    • flavor: basic
    • sha1: 8cb27127e6dd5dbbaaeb7c1f8c731d3cbec73360
    • kdb: True
  • last_in_suite: False
  • machine_type: ovh
  • name: teuthology-2018-08-13_05:20:02-kcephfs-mimic-testing-basic-ovh
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 7.5
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
      • sha1: 35dbe36f19323b6b885fad71f47ad0ed4e9d09e9
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: 35dbe36f19323b6b885fad71f47ad0ed4e9d09e9
      • branch: mimic
    • install:
      • ceph:
        • sha1: 35dbe36f19323b6b885fad71f47ad0ed4e9d09e9
    • admin_socket:
      • branch: mimic
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: mimic
  • seed:
  • sha1: 35dbe36f19323b6b885fad71f47ad0ed4e9d09e9
  • subset:
  • suite:
  • suite_branch: mimic
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 35dbe36f19323b6b885fad71f47ad0ed4e9d09e9
  • targets:
    • ovh066.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDtWh5slWkghYopwdfFE2bsYS1JAri675Gi83nRN1fNs/LcyC+cBMZQ37Of3Y3B0Bmxm9NDvsM6KSqBq8AiCVWP4XeedJ5uX1W3pg7dPxC4jTN+e9gkMgBwgyPs0SF7H3w+vksfyOdFtTH2yausBNNB3sB/+mdBKfvv0nKvj4Gp+/E1ih7hC1UlgnjH4Ix+xDRDMY4n44DJbfPhf4N/SiBvdXR8efH/ltwG/nSf955Lwz5xvOe3DG1gWv13LVh82HWzyd5wGzQreeghViFKSwV+2m6xZg9xMyAQnfenkMcWD+Iccd0o4IxAdKNZ4uTbi9FffZCvOOx09/QoyJu1diC9
    • ovh096.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC87ke8ZGz0e1nn0NbaNfPVNYVxOA/1okEfnmjzjQ9MuKr6MVgIE3sbaQxbyEGpPguDLSXoNPagyO7iFMwT9QINLTsSp1/GsEYzK9AAc/8lKJuDTTRyzZdZdn+5K2zeaEis4C7ddfR74OhH4fogMDNQBlJV2FbYS05mzul624fw8+2dtuSplwzxatBx80nqvqkUSt8BODgbr9WohWbj2wZq71DlpuFGsrIi2wCXMs+ful35TySh/pzGCgJFQvobMe7uS6W2P8NOWQm+f0xaIDCsL22khX3zQFRkJZS8wY1iMowNoQwv2+h4LbgZ9Zq6+ij8GuYCNx0VZQanahlBSmD9
    • ovh009.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCync7YWDJqZcMruq3dmZk/gaXjMO7yz7yR8DtvDgTz8siVfgayBBtIYnllUaY7NzLUiCa+B8bMuMUvrGTzLAYDbadNhVvYfmLSKdHZSbSiOcLNPq9krKP7AUsC6ah5rBe8GsjCrp7riaw01gYgLx/SXDRvPvWLBRfSJo33bLKGkvO8Fo9bWgfS6LD4ILnh3QC82v0VDXJI9s7wPXs83xVMYwIL8FFhcqWcz2Zxp8DyC3flBuulsUyMT/4ThZkfRfaPOGzd/YO5EW3RLnadLKmOLY/rnWb19//uiYsSs4wra5aQ4YzvrG1i8ZInc9scmEsRQEvel0GVZeLM0771HbHH
    • ovh087.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSJHnblndiJwPe/9DQW48zg+KKlXZGW6IDkreWwnEAKGNcNR/E+juESOYGaAtL93Xa5cod8SZXhZLzOe/D/G8+4c0xazYo5XgU/mI8P6/IMtD7WI+uM3iFnmeURedGY+Fm/2E0IAPd8Z2GiOoGsQHPVt+WESj5jAa2Kx1c9FNPopLi9vfG1blKZaLlq22rcwTT/5NKJAa9YKscrrFTSQNSzO1G0I6Grv1uFfDbuQr3IRm9hjQ3jE1S8SlhMjywhCbWU+EtrTm2+jm8RspigNO7ykHxPMpgVcF4zUgjDA64VvwwVOORORqWNKSVIKxfaaoiyn8VBd733RYY6H7lW4wP
    • ovh049.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3yOCriyjfXq3p1Dv1Qi9x+KIU4PckzqTr9/srMGYNBm/bRtUtH1UF4G0eXAn/cf3A98ZN8Tntd//I8ZHcf8eQjJw+8nk5/Zna/VfECGt2PCaYpdvgZQMejc6/tL/rpeLMyBe3NiXN68HQ3P7ThPp1DNDh1kjJ8BC6Ij1Z0pGByN/GwBwA4GWjaF3lac2SVoDo4aQO/jLLG7NI4heTUJDzL75vX4n9fmC3eg0+rG/M06zvxYrAe1qXJljaQx34ttNu0H7tfSDqybaGlFyI3XFZAZOhdcMUydERie4M178kYF4DO5dTRdfB6lKcd+lcKl2IhrslP+B0gXfPricv2NFZ
    • ovh099.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDW7j+nK5McQsdGpgb2BX0x7K7KzMGUjDqZtaynNSoCFSsDUe0ggrF5jmggC+g04e3zXIxEQf4K1vBcMYBN/Fszcr7nOd/9EoNNEctDhuMlUW1diUitl0z5/uhmUFLSiFMCWiPeZDm+khifZVuVOi0fPSB/1/xM4ehw4VVFfuM9dhODzpAj/mJjv7ATi0Iqx+91Gtg6nB6wIuBloxXYAesrhmUv9dG2oiaAOFF7zc+8jLT38V7SrPAaocdau3svzFwNb4DsnTGHCKXTZyNcYxIzZFw2I6eJ3YPMZxGe6+i7BcobxP2rIAJuv4wNeTyDU+gJglMG3Xx7Sse9mDC47j/t
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • ovh
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 8cb27127e6dd5dbbaaeb7c1f8c731d3cbec73360
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_strays
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-08-13 05:20:49
  • started: 2018-08-15 20:02:55
  • updated: 2018-08-16 02:09:01
  • status_class: success
  • runtime: 6:06:06
  • wait_time: 5:24:06