Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/strays.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2018-07-07_05:20:02-kcephfs-luminous-testing-basic-ovh/2752471/teuthology.log

Failure Reason:

"2018-07-09 15:01:26.630005 mon.b mon.0 158.69.83.9:6789/0 982 : cluster [WRN] Health check failed: 1 slow requests are blocked > 32 sec. Implicated osds 2 (REQUEST_SLOW)" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2018-07-07_05:20:02-kcephfs-luminous-testing-basic-ovh/2752471/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2018-07-07_05:20:02-kcephfs-luminous-testing-basic-ovh/2752471
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/strays.yaml whitelist_health.yaml}
  • duration: 0:57:34
  • email: ceph-qa@ceph.com
  • failure_reason: "2018-07-09 15:01:26.630005 mon.b mon.0 158.69.83.9:6789/0 982 : cluster [WRN] Health check failed: 1 slow requests are blocked > 32 sec. Implicated osds 2 (REQUEST_SLOW)" in cluster log
  • flavor: basic
  • job_id: 2752471
  • kernel:
    • flavor: basic
    • sha1: 0f260f891580b0c39eb0b0d4e5558856e3713f85
    • kdb: True
  • last_in_suite: False
  • machine_type: ovh
  • name: teuthology-2018-07-07_05:20:02-kcephfs-luminous-testing-basic-ovh
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
      • fs: xfs
      • conf:
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
      • sha1: f7c8cfbd73f2fc284a13da998bfbc4532464b532
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: f7c8cfbd73f2fc284a13da998bfbc4532464b532
      • branch: luminous
    • install:
      • ceph:
        • sha1: f7c8cfbd73f2fc284a13da998bfbc4532464b532
    • admin_socket:
      • branch: luminous
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: fail
  • success: False
  • branch: luminous
  • seed:
  • sha1: f7c8cfbd73f2fc284a13da998bfbc4532464b532
  • subset:
  • suite:
  • suite_branch: luminous
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: f7c8cfbd73f2fc284a13da998bfbc4532464b532
  • targets:
    • ovh090.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCetElwQGWcZil+9KTHdec1oMb/kv5e9QaAfU3W+hlu7kSvSK6bzMhBZPjnW+U1fPIPo/RTGvWuVLjaj7PkVRjmsdm/Ml4wgHda/Bi64Qv4wnO8nUCEPZd7LRnt8KSfk0TXQE39c/Ij97CWzgrajgeasStyy75afjJ4Ca71iguqJkY5qAxF3CAORWv2XlVh4YFE33pLOAiSDngPV7SFarhjtdV+9hcc0/uJtTwRtm9nXWOXdU7kPBdOC9hNc16DvYQP3U9YsY/5eOE5IKFjP9+fmDkdh7oELhCSdJNGWWKjusEadcKvBqvBOcGd8/pfY4WcaedUKcsB2/jK5wCl9PkL
    • ovh060.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDnOa+2T5xFtfVsMf1dwJg77ve35QFGNCVfhNptcZCn2rk8DDZVg8h3KRdr2ZsuKgQXWDzcJC/ZJyWtyo4imouKUcX1VEqHQpk/rTxHH1l8jdIv6fGTBZwbD7IUR5j8TMoDtxSdzbkculV9Ggxseq+J4/iIhKYox5HTOWaX74YoWNZ4pJwv4AolmOZ1JAqE0r8PbV2dCJdBXMLZz+IjuxnoQngYgavrTp9gDkN66Q+iieTZW7d86v2oCGUb/E2Nlb55rRcRbZq4Ai1iTaPPsl/yU3aDycw9uEwveSUqMlN9D44Cd3sUoMV+KUABBH0Ld9mrtFVg5yQRiZI6YEe1EGSB
    • ovh096.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvSu/TNE50XFC1pZ4G1KaVN7omqjzy/GpP8RzEpIeTaCSysm2cbs8Pryd/1LcolHigZilwFXmhuMQv8ei36UuELc9/mHOXXbByCusASCC4jy+F+nfPAwfko3TgWM8yPn5GlEHaciUWk2K6Uh7jGjXkAaaupMY7+V0tJ2ibO6kCK8JEdluKoGRRbZk2isFSngV2NpWMHS1MT4NtTXouwktyrasM9DabwGTdvuHRmQjBHoI6hYPAza2DXLIQwoiCCZpje4UbJYkYoBNze1w/NfVdv5cuSxKZkgZ60LZfQ28/SfW9j8Wm3encFzWaFZC5+7RzM9u4Z5PYIyrJfGc0s70/
    • ovh097.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHr7bzvr+qYonN8Ohgy+n0EAQ/11sP5j0kLXk9MxQaAdD9yzPghxJkboL9MdNcwvzhWgJWnj9AnQ4k3aFQWaIM/js+MD901jw9jEWlLdzuzeXTAVh9+1qGbkr2fvIkeL78N7QNGY57p98zL2Ph+tQvHfo1iowrrsr3LiDPGp2yurH2oJsQGwTVojo/FAOcHLuJNBp+z6wZ+rMBQfKXinUTbYqXgrxYBBgay3Ocp6mhqa2g4Gbpa1v1+03rUF3ewRt5PnXb9rdLQmP2ySeQaRQcTlkPWvDja+GYrb1eVulAw2Hr4mQVCVg7EHCP2LdWuU/QWO96qKW8pymKmOyPde/z
    • ovh054.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5SqZ//SSWw/gDf13MpHkGVvANiv8pqVVSJL0uOFjvOsFR0qD3+VSbWsIjprylPasvGRTvlob5bh6FhGWPPARydbSKkbG0wg6V9vvmmP485HcKUnXSgznt8sNEk1nx5IOcQbkQbqYvpzqYfOC34s9TSMc9wZgA8rGTVqrDqY4AnFERZxbrSfoW7oTaYQT2ouQlmAFMuVVbVtOBfpSpk4SW2Mfmxt0zyftnUBfSg6LdePnzQhWvUECj/wQkqH9WzfNnVeATzTX/AQEswubiRQ2qqdKnM7+F/kyjLxcfqGGH+JY/uHzJTUS1oGVBXETFRyMX+XpczwKWxKeBEbaCkyUT
    • ovh019.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7BlhSSvgFxQvxFVFltATvjrnz9byW1OjowTcI8Y7+5fRJciXvxNWE5LNfoi9i2oVUY7dxrHwxAuXof3ABHY6TL/XVZIx0z89zz9Q72PfGEydiftGtcepFjDwqV5H3iGJQpPT8KjfQROolfWok4vbfE15zVQwfEI3w2xqSq5rnORJOwGdgJL7J5Cy2Z5H8Afj0kSc28EHzAeu3u1rc6GjzIqAYr/a81hlfKgYb1v7An+M56PaygpaNBtfx4/r4MRWiKp6LKDs2cgY1XSn8pon8AwzP/bmhpr1gQJ6H8XLsNcqKprEX3QpRLFZ6i2NPT8ANwN6G8HYtJKp3DCQt1TfB
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • ovh
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 0f260f891580b0c39eb0b0d4e5558856e3713f85
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_strays
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-07-07 05:21:36
  • started: 2018-07-09 04:16:50
  • updated: 2018-07-09 15:27:03
  • status_class: danger
  • runtime: 11:10:13
  • wait_time: 10:12:39