Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/client-recovery.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2018-08-27_17:35:23-kcephfs-luminous-testing-basic-smithi/2951440/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2018-08-27_17:35:23-kcephfs-luminous-testing-basic-smithi/2951440/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2018-08-27_17:35:23-kcephfs-luminous-testing-basic-smithi/2951440
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/client-recovery.yaml whitelist_health.yaml}
  • duration: 0:34:52
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 2951440
  • kernel:
    • flavor: basic
    • sha1: 48ae238032dd57e6c55cf92e804d87e059d3fa4c
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2018-08-27_17:35:23-kcephfs-luminous-testing-basic-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 16.04
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • but it is still running
        • slow request
        • evicting unresponsive client
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
      • fs: xfs
      • conf:
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
      • sha1: 1c0f777add3caa8cfd8e8dcc106d87257e3d6c6d
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: 84a2a9c5e749f186ce80d64200a568b595dded7a
      • branch: luminous
    • install:
      • ceph:
        • sha1: 1c0f777add3caa8cfd8e8dcc106d87257e3d6c6d
    • admin_socket:
      • branch: luminous
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: luminous
  • seed:
  • sha1: 1c0f777add3caa8cfd8e8dcc106d87257e3d6c6d
  • subset:
  • suite:
  • suite_branch: luminous
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 84a2a9c5e749f186ce80d64200a568b595dded7a
  • targets:
    • smithi135.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKHphzOIpW90UycCajFi2OR7EQhA++eXA31RqFcXca6Vj/pKFCgp2f4yXyAA4Yrark9RqUq5GBisIsaIAx46myfwBz48rM1IsXIsLsUU0Cvx0tK1quom4adG0TtHqkKcqXln01CU7t0oT4blkWWU+9UcaBzg/uTIL1YmwCwgaUMoWMy/e1F1O1owJaFkqpBLdHN1EsW3zCm8H69bIqLNA2gslO6mtoXy1aZE98p3ebAZOl7zohk1OBpIU8YMNNwF8tmosd2N0F6LZ2saDNOk/2Q8Qoi5FDvBL4DDUrZcNC8mog6RNgOOavBx1ThKiZ28FMbTJFTiZ5Je7QJnhb22lt
    • smithi196.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5IRGMUL2sOMUDBA69PFUvw+O8NbhK1p8UcPfoIcUR4q2c3TZU298GmsFpHzhc+wKGnKvSZ+K7X65Z/wKyCp1tCgXHoSXrdFKM1DuvmkVWDkYTBQwOwFDUu4ET/AWANM0Lz8hMHwIUT9C56pPtkDdXp9tN5o589eVv9ckvDI8IjvPijATrYa/lPk9lkxjrHY7NRxtr7Xihh+GXMu0n1L1u6aHXrYP8d6e6OII+y0EiLaNNX4h9NsKtHwpZ8boWvUzt5PjvFxy8wvCEbnnQoTcRb4sESmPODeyf3ZJHTONZngj4cqRF5OO/bka3W3XNywjtUVOm6LaDRXZomLdNj0Yd
    • smithi163.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYgI2vKNoPwUh5ZuRsN8Sr9bmmuIgQaZIs0Hj6E85y5M7rX36+foM1i7IOjRKwyCSSeUXvbFt9CBSjDD8yaSnZmt0oVhPFkYkp9zda6oVSuOlkxHYURL3cy1BDoDVP68QknoPA53fMsx3vCKpKOHp9Koffota7MfPBKSJb+6QFvlsNQzv9mAlBm98+nzLJGkSgQQmTehrmr+j1gDt/Bw6phNRrBJcETqB8I48mRGfTH5jwN+aYluNeGwzqhA4eohHDNGNabcipJZ/tHvB12ZzgHVUx2hZVp6TWDsTrUyUXD3KULW43OguinRs2Zb0cCphvEDJmjNPxcuux58H76slV
    • smithi008.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHD2r+nrvBIQrz8Bp6qb+G2+4LgkyOhVGIstjTjnuB80ZXB4JplUziLU6rbokmTtqgw0sjR/8EvXlBwCMdQuxi+cRZIDPq0U4rjThm4pBR6X050pFRFV5dicsa/6jNR69sbaXg6w3gAOXORmnjYspHaWqg/oTRS6j56x9KAwC+radtQAKVFZVoHo3TBTXeO7tkXc5DVqUA/nRA2HszlJkJTWs9IUxAz4uU7MosdPOeP3c6cRB70ncSXIOlKTfpjcqYCWID9AuPbqUvczZyudQuQ+2UfxKJ41JQjOjIPoZxFRnY+e6K2jHCqSOr3ya0qT/v47l12A/d+IDIkqbl8WUV
    • smithi064.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8/9LiI5QqWYVGeUMjH54qLyCWVD4FNV1Sst6XLRjxrA0Fg+87YQTM0Xtiy6OXN6VEzIPIhamNOwNVvskxX/ODOgPDWtOOJPOPST0GX9izVrlasPoc1LXvUpmqaAwMtRUqv+5oR2Saw4GR3g98jV0oIrC8jhKXZc8NEJNIgGBQZyuZiJTrR9ofHjTjM/Z6OaL121olFaKxsGcE6Q0fQTdXMU57GnT4uSq4zm8Q40XtOOQByxp7XJ8xF7Au92pr8Zpp/AZdI3ahe1wvrl951edywUByXSmajSCKI9KEzRD+xBvCAlDAQ4NoGrdcvSzTNmSC8utaJ+nEdPFu6aIq+f3p
    • smithi029.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCoxIsAukqLBPdDZqDiDT6CsKclXO6s/0go/ObK2Qn7iTob/Pxa9hVL09uAxYW8pbl+/4qv9bcPpQCBEgulwcSopnmf6UoOM0SFyhMygPUhOWDCcWJ81i0qKWeSsIHOSBqVAaS0JdRfyx4tQNCoNtg1jdf0NcmWGj6MmNSAmXg0u4hSZPYN8hEUCrvicCFpk+yw00yXRPaB3dRHgeqsWab/vpJhXu7Ksq88+7MpUxMNngKWfj2rRpXzXXdCHufWMHB5fRQkg4aVxicC0MguQdVTcq/f5N/ALNt1uxanXae6waTnZzfxksnU7yS6dFUBIG+8gWm//TK9C2FttYQwZa1d
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 48ae238032dd57e6c55cf92e804d87e059d3fa4c
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_client_recovery
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-08-27 17:36:09
  • started: 2018-08-27 18:45:02
  • updated: 2018-08-27 19:31:03
  • status_class: success
  • runtime: 0:46:01
  • wait_time: 0:11:09