Description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_16.04.yaml} tasks/client-limits.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2020-04-06_05:20:02-kcephfs-mimic-testing-basic-smithi/4928396/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2020-04-06_05:20:02-kcephfs-mimic-testing-basic-smithi/4928396/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2020-04-06_05:20:02-kcephfs-mimic-testing-basic-smithi/4928396
  • description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_16.04.yaml} tasks/client-limits.yaml whitelist_health.yaml}
  • duration: 0:16:14
  • email: ceph-qa@ceph.io
  • failure_reason:
  • flavor: basic
  • job_id: 4928396
  • kernel:
    • flavor: basic
    • sha1: a54036f8d80e448218937cc4faeec8b0e17a2351
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2020-04-06_05:20:02-kcephfs-mimic-testing-basic-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 16.04
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • workunit:
      • sha1: 8f57def2bffdcc86fc08466d080bd791fc1bc57a
      • branch: mimic
    • ceph:
      • sha1: 0aee45267878148ba653063540ae523c02daa018
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • responding to mclientcaps\(revoke\)
        • not advance its oldest_client_tid
        • failing to advance its oldest client/flush tid
        • Too many inodes in cache
        • failing to respond to cache pressure
        • slow requests are blocked
        • failing to respond to capability release
        • MDS cache is too large
        • \(MDS_CLIENT_OLDEST_TID\)
        • \(MDS_CACHE_OVERSIZED\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 20
          • debug journal: 20
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • mon osd nearfull ratio: 0.8
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • debug filestore: 20
          • debug rocksdb: 10
          • osd shutdown pgref assert: True
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
        • mgr:
          • debug ms: 1
          • debug mgr: 20
      • cephfs_ec_profile:
        • m=2
        • k=2
        • crush-failure-domain=osd
    • install:
      • ceph:
        • sha1: 0aee45267878148ba653063540ae523c02daa018
    • admin_socket:
      • branch: mimic
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.x-s', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.y-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: mimic
  • seed:
  • sha1: 0aee45267878148ba653063540ae523c02daa018
  • subset:
  • suite:
  • suite_branch: mimic
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 8f57def2bffdcc86fc08466d080bd791fc1bc57a
  • targets:
    • smithi201.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVBx1bM8IlHg5ErlzlEf9D12cgurlAO4hl8FS5M0X7Why8/gO+3F8FfUs2z4xleomPRKNEMlAh1hj8G+N2q+epqcdR1EqEHgondSqVqXLOakS4O3ckI4pD8glZDbX8+QAV40V4vEYLBQPDGU1FXv9I5aHKsSgUaU0c1WKVjhgU/CTKbDm/KZdCeg3w5JL2HCBFjsEb5dHqzcc4XuV+MwbICoUFQxxPxsfmRWLHAPIIkEmdwTh12SQ3DBR0yxKu1ueqidznr1H3QGDafjmmTxyhV7F693ZX1PXugdhGrGKooSdsjM1Jj8gpbd96ide/30tg3NY5mS/5JJIqGRukVHAx
    • smithi093.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHTSFJO2fX8P9okOtcfZw2Ir3I+xfoNoLj5hlHavBYC3LYCHESIp1rCfOHpFioLTwwEzodJes5hA/F2lmG3DQuAhPR/NkujxbyBSzlF9BI53B6BOZDkFch3zFkcZYNijiOWNzEwW5tJKi0hJNHtEjFtcVtGh0Oklf7T8dheotRvKowYLc20Z/vbxmO5Tm4/b63XJzj6X/3H7RdHHDfHuk2pXpX6DoRdCtHPaUvhM4HtCPXFV3nS5eoGxJ6KJDGsDhWkCivjnWqX6NrmaQx4leLspuarGcGl1GbtP3WK+d77K48jbR0ZE+0jMj87j+qyCSUq6aElUkjWhuax0frKbSp
    • smithi087.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDbMoYlu+qQoA2e6W8k0YPDsi/6Fzx4ovZlXW+f5u9TIOUIwGeD2sS0ioseB4RAlL6RCI8yS+sAWEHbOe2bibnswKnFvE7uR8CBD1udA5GrBBx2uG8WBbHZWRgHnFRwxitdEXW7+Gv3gbiahOcUguFtEF1w1fsMN5/ObKL34SRKpsCTmNKuvXoI5/IE9McXxCx8j/mmuGaJy5mbCgB/c6J1MKZL/9XdpED26AQAeQGudwiG373NKkW0AbYBd60Zpsr4ZVnatDRdAsUwH3Hu5yvrmUKZgjUjxzbhXr8eW862i1cVXMeBgtEkybG2IjNVxmE0cPJ+/rhpqnUhZb9ddxO7
    • smithi092.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6DZERLn1UiY0POuZGdS06YkZasdooOok46Hf821yOEallVjVzHhToVeBYJUYLmvy/+FBpG5G5Tz28b4uo+Q0rET071LpjI2lSRVTqtR27kcwW7k3rOfT4cm7AsaaLbG5bfj5V4B45ksMLzGRga2wCzo9z9/bCb2pn7ZZCOXNYgcUbQaYc0ikPeh1gRl1XrSh/r38fxecqIzDlzrzFWtDRa+Y3OejGumqWpD+a43LXp4RaBZYsfGxJT9I1AIWH+ymxLROGlNpnjuM60FdS10ZtDxufbOwHqf3LVQBAzAaYuXnrDNFlX3Ndy/rreTu0E8y4rELGvUbsXX4Dl5R6qUfR
    • smithi061.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDENmi74jO/7bnaZ4jDeez7qlRYszq5pEOcwAR314E1PutpAsEzKI5eJZs9ciHKd46OU8/pB3qIEgxKt3pFrbruu/KJYAxuYRFzg3+vdtvrtr0WqezWxREfEsFs8Te5s/uqR92n8ZgbU2RdICSQu+h6ZcNXV+naJFPgJWHAdG+VgIiZlhkf0jsl+SS6dhu+2G9q8qlvCXV8MevmsoRJuJlZVB9z2zuE7Wij4rbZQRPr6Tzw+8lRGKO64a6oON2WO1iWE1P19X8nLeDsyO8WeUCe2pHwLwvPftMTHqZjtIjzCtOK3H326DIDLiWjhyeZQYQL5VDm+fjGXK8OfxVaThV9
    • smithi043.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDe1UAKTX4FZWHuzn5PNFak+TehvFMSxODfOAfM95Jp2GO41pyvGxZhl7RPeD/nS93pTz8I4hkc2LD9wFFVIt/WIRGbXFLNtUAPeGzDyOxWN9gvFjpIz7v2tYexM5xcue1NDu24S3ma7TGWY4cakgwbTNp2BU0SH4l35NIYryyL5U7JsCDu9zo6WQd5T8Vgny/O62JlauBogTJuGkg+3WW3G8eNl6a3qNel/jnaQEtLlbN7UmsJ8W1H8ic53AGVbDWEVP4x0hAP6PAunchHjp5q2m4PkPQFgIIEdNxAok3dQQkRq2eYbQFa05gQn4GsZXN1qnIWdlU9PXTas21m8dkJ
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: a54036f8d80e448218937cc4faeec8b0e17a2351
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
      • extra_packages:
        • python3-cephfs
      • sha1: 0aee45267878148ba653063540ae523c02daa018
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_client_limits
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2020-04-06 05:22:53
  • started: 2020-04-07 03:14:16
  • updated: 2020-04-07 12:00:26
  • status_class: success
  • runtime: 8:46:10
  • wait_time: 8:29:56