Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_16.04.yaml} tasks/client-limits.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2018-09-12_05:20:02-kcephfs-mimic-testing-basic-ovh/3009796/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2018-09-12_05:20:02-kcephfs-mimic-testing-basic-ovh/3009796/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2018-09-12_05:20:02-kcephfs-mimic-testing-basic-ovh/3009796
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_16.04.yaml} tasks/client-limits.yaml whitelist_health.yaml}
  • duration: 0:29:20
  • email: ceph-qa@lists.ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 3009796
  • kernel:
    • flavor: basic
    • sha1: 56f94a157df74137ea5626a561794780d16e608c
    • kdb: True
  • last_in_suite: False
  • machine_type: ovh
  • name: teuthology-2018-09-12_05:20:02-kcephfs-mimic-testing-basic-ovh
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 16.04
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • responding to mclientcaps\(revoke\)
        • not advance its oldest_client_tid
        • failing to advance its oldest client/flush tid
        • Too many inodes in cache
        • failing to respond to cache pressure
        • slow requests are blocked
        • failing to respond to capability release
        • MDS cache is too large
        • \(MDS_CLIENT_OLDEST_TID\)
        • \(MDS_CACHE_OVERSIZED\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
          • osd op complaint time: 180
      • sha1: b8c077da26bf40921f8fc7b46cfc38eba6597a43
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: b8c077da26bf40921f8fc7b46cfc38eba6597a43
      • branch: mimic
    • install:
      • ceph:
        • sha1: b8c077da26bf40921f8fc7b46cfc38eba6597a43
    • admin_socket:
      • branch: mimic
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: mimic
  • seed:
  • sha1: b8c077da26bf40921f8fc7b46cfc38eba6597a43
  • subset:
  • suite:
  • suite_branch: mimic
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: b8c077da26bf40921f8fc7b46cfc38eba6597a43
  • targets:
    • ovh096.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9D82ArNYOzoKe2vzTggmmyVIXKWRlFGPeNeJ8u0JrfRyCu38McNBqejAP0FQNkmbuk88mI8IGs91NAl3MBUmWPqJTBtS+aLEU0mkCX2v0dXKlMI3oqNSOExudaVdhiigpkpswziXaW2jwE+/DgYxw0b3sO8njAbEfqIo7tHEsLTCZh2KkdKDO59wHQzufDymEesPLXDoyXAQTdarw+evNTOzK0m2iy9Y315mkxPe2mKqYFUMgrlShe9GNEJvQTCVT7++nwrt7kVVt0ewTTeC5OIxO0xEJeQo4JcevATKbDCwHg9JaeRVLYuSe9NkrQQBPLRoPDzfs1/BIh5KODwB1
    • ovh028.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+2XR2T6Pd0YvfjBB43Z5LZBmS14MGnzyOrVKTUDwMU6Th6NS1f9FldwSYkUfUwQWaD4ZFVPjT/9A8zBi/Yz2NhE6sW2nae5aTk8pGZXe7O/LikSCDquUs6BiZ2mbjiYoAd+g+/1qWeN4j1C6jkwaOQX+c1jobtfQ33yn7Dut4VWr8oVtE+cg+ytjxsM2tOKA/e6ocn5xeYPI7gIEoakt9hJicN/804N5NwDKKo1Xt1b2EHDSWGrxtUHcAr4vmRpEUzZMElET3WRbmQSz/5Aht9nffyQI2Sv5IOQ8zz+FbBBhLZr8S4kn03y1ijPxp/z0wBbXdLnA+T/mby5uSTbvH
    • ovh085.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCv+7Ao7+xrYn7wj4ALKGjmZhqGFfMAezciY9C14xAgmbw3bNKQ4g8vy+I5KhG2MhWe5tif6z28EFfd8sjlgOIFomVB6fRbhMJWeDbCXEi380tSkVdXDpOB23HWcSR63s/MYsHK7KH8+9eG+8uW4Fc4A0as3E1o+slmDvBhQ8BpDOApdJYe3oJ5IrYjA0o/PHLOZsb2g100x/OmSVK6CcGObu21+tb6hner6EakkQbA/Vzl6WlzKGodF1guz4kKT+0d0hsAPv8TsihWZtGBMGacnfLA0MtLVltfJXuDpU7xeqjFyHE+rGTPV6P/GPxRgnENFPFdDqQRuHTkNLPmAlZp
    • ovh016.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDwXZgVHGB2MZ2QVGxuUnVQC+eMkbFA3D6OvklyH/5XJcAxOx02HmcOLc+rXsyfSZcWH3D26XbA2v0QUIcUFw276NMVcZ7rXy8h4TOntKpXJJI+H+6DAJmq6CEOr+g0LITbCyxQAFi06jaB15iNd3ylb4U+dzCCjRB4dREDgI8ZHwedrxi6qJp2q3gvwnZMvczr/48VSmOqma6uUBtigramn7xvhRIrSuXxDB+HHOiBsLxiY0eiIYHfhEIB1XPV8TC7jWkmlF8NTrKktI85OQpnHy+XbvOP5t+51EwMrNOtpSQ3zWiRfGv+3g6HDAEIk6m+aNzw9gwzFz2l+A8/Jrd5
    • ovh047.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXLXbbpDCzRvUrnnhzM7cq+0FTS6q4hzisT5qSPkCUqFFdQAp/rzsFJYWkYvbHf8N18EmDgDiYOoNhWvkBeHp+pbjZS3uSDzq9E+1KwkUEx03bzPwD1VnfRPokWIn87k1B9qgJRTwbFy7VtUE86kj3W4YSxQ1tBxi7iCvca5Q7WHP9I212nqQI2F2st28guGOGiEd0/dnPp7Yj+vdt4WNUlOviXUAYueGCFpz+sTA+j/PHMQfe5ZcfWaHGEMAuy6Q48au3TLw3CrUTUh9YYUM+1gsvgQfAgkktzqNuWLvIAiNYAZriODWPqCc4lz1AtOX9ezjMUDqlBHA2cJdb2qdx
    • ovh033.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFD/8ClVSHsiEWk/y1KXxUmFOdkymzhDWHrEYB4TmNsGbofh9m7WM8XrwwmUVedyOl336xchO7S9TXyJ4xoCCaFO4Laow+invjhsPFQAn5DWR6gSCpf9P4k0ZNaqssKkxiYRwJKRFfMxGf3oztCH2f85XJ6oqdeYdX4xQTiuh7mBx5Ax5sV8Eqg5rPpSH/ZnMcry6wyBc9nuaaBgi+FNcNwT3njEO1IAWdcepg1ejKoOWYwTD7PDjo6w5VHxfy5IhvZp4P5vzU+h067xIHK/4n5AkP91OlF0WBE/mG8l4qc9Wh2YveHsI4z0DnhYkgOFKgzLTxBRGZAtNMPNF1AZCz
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • ovh
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 56f94a157df74137ea5626a561794780d16e608c
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_client_limits
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-09-12 05:21:29
  • started: 2018-09-17 15:04:21
  • updated: 2018-09-17 17:50:23
  • status_class: success
  • runtime: 2:46:02
  • wait_time: 2:16:42