Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/strays.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2019-06-15_05:20:02-kcephfs-luminous-testing-basic-smithi/4038606/teuthology.log

Failure Reason:

"2019-06-18 16:11:45.191421 mon.b mon.0 172.21.15.39:6789/0 943 : cluster [WRN] Health check failed: 138 slow requests are blocked > 32 sec. Implicated osds 2,3 (REQUEST_SLOW)" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2019-06-15_05:20:02-kcephfs-luminous-testing-basic-smithi/4038606/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2019-06-15_05:20:02-kcephfs-luminous-testing-basic-smithi/4038606
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/strays.yaml whitelist_health.yaml}
  • duration: 0:27:56
  • email: ceph-qa@lists.ceph.com
  • failure_reason: "2019-06-18 16:11:45.191421 mon.b mon.0 172.21.15.39:6789/0 943 : cluster [WRN] Health check failed: 138 slow requests are blocked > 32 sec. Implicated osds 2,3 (REQUEST_SLOW)" in cluster log
  • flavor: basic
  • job_id: 4038606
  • kernel:
    • flavor: basic
    • sha1: 598b9f316e43329b95037c57179d429e1bd5754f
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2019-06-15_05:20:02-kcephfs-luminous-testing-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
          • osd op complaint time: 180
      • sha1: da246edbc62460c349adb76877b9ca1f6611b9b6
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: da246edbc62460c349adb76877b9ca1f6611b9b6
      • branch: luminous
    • install:
      • ceph:
        • sha1: da246edbc62460c349adb76877b9ca1f6611b9b6
    • admin_socket:
      • branch: luminous
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: fail
  • success: False
  • branch: luminous
  • seed:
  • sha1: da246edbc62460c349adb76877b9ca1f6611b9b6
  • subset:
  • suite:
  • suite_branch: luminous
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: da246edbc62460c349adb76877b9ca1f6611b9b6
  • targets:
    • smithi093.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDceDLyD3j6r2vbZHCpmtC2eb7kyv5LYwMERlW/k3B/j376Fo5APd7gy7PXN8BRgfwEACRsDsMeO1seI5T6n1dz+F96nD8UENplUC6svz7aTUNbIllQgI1mmm0HBYrA7VAP7CHsq7tV097KWRCWrDsFu/VL/x+iQyUBs52lvTUQcuIJHtRhWZchrPyZB5W1/sld89+OBmLahx1s870yIbjcbMC6Ww1AquisncBqPGYFuf+ExKh8HKziZOHl4ndeKtTmA+hparV95NSuaXyzj/B6EIc3jzEMXiP2HyRVeYiADooZcVXKXmcsJxELUxGYuXuf8kRELA0yqTsclJ9XFYNB
    • smithi149.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDnzTKqy3LaxA7MFtMPLLLuHTpRC/SBI3DpV1/PlekFYOH1FB2m6hSvlGikmDOD4E5PT3nRkTzjxC+xk3PfvIpJm94TfpVi5NWz24sqWxugroluIpxqcnFsv9val6P8SEbRrC3YV0MO1IVv1+zxz44uDoxxrg6ik+ZMg/vN1hvCJJaQNoje1XngHH4uJqvXvpL8/0H6K4xGynR8DwjiAVvZnKqgXmNxHz/cG8OUUbD91f8V3Bbl+OG4j96aK0Etw5WrYatlFTA4bGZW8d58LcsCE9J33lZblkHi8qcx4w/ol2+XmHjPq4SFhyQJwirHwI/on8i9AkkaREFl2vMztVKr
    • smithi156.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDODu+aimxLIH5r1lu6ndgoHMMGAn7txb+V/84Piv4NS3vTlZSBKqvlS3aIGYTyR9j7vHEEosp48agenpnVrX+Do5Lr4a6v/G0ftVFRQ3mwgMQB29HizOT5mkNgI4hlWd6OZ+d8qW6hAdUiuz/u3I3jBte0t6C6aynEwoUCYa/xYcY24GvzjwYqRNCbmj5uqa6Ue7X+W5QJtN3BJdbW9kIgxisdpVQ946P0ie+OL8MRAp/+uK3tMrFzeNDUOmp/rf9FJBRY6gX+RPOF9jih2gbYJDEAZt7ibtZrUJLJmYhiIsmKICe66QmxajyngT9rWzcgFDDECMeulGcaonYLMW1x
    • smithi105.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDOMT4zROqu9zcbM3CXVI5e/4Ol15i2Un8CdMov/eZ45Sv+0WAyruAKusAUEspy3/KTiXpYlBr1arnipRak1lW5O3B/Y2B1DQCX7i+cZuRSs8KNtRi0K5lZXSoLu5ugxSsfvvvMwpl6zTz7x29uPNBviKl6pfl6y/hUZgXKjpKZCvYuQSnFufU/vwRcPpbN/wB3gXAtpP2GOuRDJJeFxDSFppBUSUln0bheFx7QL06pjJ3iPnS8b+55DGLJeE+GrxnG+lLiko2kWJQZZWnIk4gWvMfx7ZYZXGNu3pt8pRTRlgITLM5FLtyNZTrxErxLD2ThWtkD0YOciCvNEfzGNvG3
    • smithi094.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3hoFSqJZJ0pP+x2z8oDFs4+9FiJH4IgKYSC/OmEqM8mqDZrHgLdEF7PBJXEst2bLz98bhkWEwC9/wE6VA+4tZyf4wL+vvYHE7cd6B/TGD/0GmeHMJwwqSsjkv0snC5GVfRaIcvV/KINGrvzkEkjO9nzDXvRZKkOuV9HHt/j7kJ5QNij//O08Dsao5We0MT4g30rPSG91RcX1BOM8GnfY1rIO/gCYQLrH0xYJQljhNa7REt6DtF/41n5tiJoF2mQaN1NEhiQY3uljGwN1rfXeckCOnYaWQKJJKO9tIAvB963eKEyr79EO26Bv4iOLUSReDacdV6vMnLx9U4RG9y7Pz
    • smithi039.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDdPLSJd1FryIXmdGGZKFZltVAyQJ6+UJ9eHhUVHl3lKCsfiH6cnZ2K+PYlf5pKg3x2I7A4zauNrHB/fgZXorrXJzpYLEXDfzjfVpMtzHPMOq9kcpiwN55cbxnT4SUW9TV38cgUdNoOs8OIZlqIo4lYIrlIzb2JdBBH4L4VkbMWH5sHESYFtrR3/xLCUXGmvYvzAHcmip2VYqmevkGRj7AIH4Dwwjanb9iTMhy3DEWi/LrvCA3JkMPAlZ2s+jEuILeZ9CgI32ObEGXhBKc7cAF1aNvECraq+ePlzkfZ/NBp2RrQysZbBy2JCeRBNjOdLnf2jMEr4YfbYs4ZsnKO2J8t
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 598b9f316e43329b95037c57179d429e1bd5754f
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_strays
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2019-06-15 05:20:59
  • started: 2019-06-18 13:57:11
  • updated: 2019-06-18 16:23:12
  • status_class: danger
  • runtime: 2:26:01
  • wait_time: 1:58:05