Description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_16.04.yaml} tasks/client-limits.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2018-10-01_05:20:02-kcephfs-mimic-testing-basic-ovh/3091863/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2018-10-01_05:20:02-kcephfs-mimic-testing-basic-ovh/3091863/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2018-10-01_05:20:02-kcephfs-mimic-testing-basic-ovh/3091863
  • description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_16.04.yaml} tasks/client-limits.yaml whitelist_health.yaml}
  • duration: 0:30:11
  • email: ceph-qa@lists.ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 3091863
  • kernel:
    • flavor: basic
    • sha1: 5ce6140385cccd66daadad8cdc12e5e0fb70ee3e
    • kdb: True
  • last_in_suite: False
  • machine_type: ovh
  • name: teuthology-2018-10-01_05:20:02-kcephfs-mimic-testing-basic-ovh
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 16.04
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • workunit:
      • sha1: 4695a60617d83bee512a8cc8ca6a999b1205e314
      • branch: mimic
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • responding to mclientcaps\(revoke\)
        • not advance its oldest_client_tid
        • failing to advance its oldest client/flush tid
        • Too many inodes in cache
        • failing to respond to cache pressure
        • slow requests are blocked
        • failing to respond to capability release
        • MDS cache is too large
        • \(MDS_CLIENT_OLDEST_TID\)
        • \(MDS_CACHE_OVERSIZED\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • mon osd nearfull ratio: 0.8
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • debug filestore: 20
          • debug rocksdb: 10
          • osd shutdown pgref assert: True
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
      • sha1: 4695a60617d83bee512a8cc8ca6a999b1205e314
    • install:
      • ceph:
        • sha1: 4695a60617d83bee512a8cc8ca6a999b1205e314
    • admin_socket:
      • branch: mimic
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.x-s', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.y-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: mimic
  • seed:
  • sha1: 4695a60617d83bee512a8cc8ca6a999b1205e314
  • subset:
  • suite:
  • suite_branch: mimic
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 4695a60617d83bee512a8cc8ca6a999b1205e314
  • targets:
    • ovh066.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4KDO1gTz1+ASOjT6JAr+qArYxBWYCVkZQKhdSbOotS3578xLcg14APCtYm7a6+/D0ToTBjjwKJ5eF58PJmxtBVOSDDkNuoWhtdxnmRLWBJP9qyAQFo30Rb6IkhlzEuOmC71YpBpNY2A2QCqQ6cvV7MdtanmiV5FsJJA9y455Tn2ZBc/RjSfBxBP7FFMerz07uJSWQAUIIPLbANirOz5dAbEuxky/7afGc8DJgAYVBle6L0bOLTznsiNO4COAe/OmsN5+9+VjoC+O6CBEWDX0/a2k2tvXbXgGPpfNeZ0n2EP1CBsVjJiAWP0xMdAugGBqieIiqwaK5eDEKLaOezwZh
    • ovh036.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZKrGae8GxD1MGRlXBSROAwyyGg87MKKmNZzDRlOK/SPzDTcKLL7XmVkzJZ4UYv7+aS55EFCZNbCj4dJApABT0gKIDZp49UpIzPAWc3U8aniTkGU1XYECGw1XvmOrTrHfdC9AJGsrcID0jCQL2ho6jMU3ZdAgXMTM01npG7ef6m0qZYMi8uU5QZqUjqGLrVAY0Esr3nqblsY7TbdH3ihw7IRerOmkt3jbP40MYrwbztVQBLQWIWvMGBvNvGQwNWkLiw6rOLIvfU6fxPvjWh+Rb6is2AWHbWR/xeZ5CR1thaHrPTEe6TB1JFf2Op0xivnbwa0tpkntN0D99TNEG9i9F
    • ovh023.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDT6ruuNERe62klZ7c3SEgiQb8unkVD7LCAyw8G+qO63dSoO+b7vOXQ1DN3WQYeeDQEOccoRe5qb9IdJjXQEC7ufpfqTBNldx7pT62F7xpiMATTktxHPoeiS5n1AbnOBqqv5vJKmT4SZ4wci0XQuINvnlGnN/RyifKUE0xaXbR4BROFB1Mx+Fp4COKAfgw5jZ/EZlXEQbkGUCyYKmvsMq0SICCVvdSCDDnrFifQX+DMeG5+x3f1B6nLNwJcKcJY2390wC3eh9JM+1mgw8SaZYIV8nuMwPi1XZ/TZ2uyB/fzQ32cOc+TsbyWJn+EzdFB8pJlsghQxwyZUDIpC3KGh4Pt
    • ovh094.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3jpAkIaNrGDddLNWk39VhkBc8Ej7XZ+SD5ZB4TD8l0YMYvhqYzyBYVG10uCHmSK26HDUWiu9xp2umeR5HjRPMUTu1Wr2/DaYw0aVFsYw60tVvrce7STz7ogu5pKwGnzAeYMoV5nVEmvOmIL4zALWdQEPRDPyPW5QOd4A3M7oH8qQLIBcAMLaE+zp+V9WdoZYYtLE+cP+7AY48HFcbroFeovutwGekQVeqBVIG52VdnzcHrjz4m9oVY6vD0ANEwLe6mZ/tPvIYq9mqvjvfF1kErTvploFo4xoKldCa2TTNZssCI2XHN8Ta8GSv/mgfwVdl7GHmEMEx/VB98tMeMEa7
    • ovh020.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCiZG0teU9vz3am6GfGSbJjP34jGwiMoQe1ziuKv4usAPiEZb+f2GnBRVmWFW6HT+ncxGHZdmf/IWFxsUgMFmYtShnqjVWxz+h6FZFKQ/xKuHGRjQfbfBfkI+Vo0VOgKfgbw9py27E4opYqo1umPYxyrJGQxwHs8SP9OaO2hOKgqZkkaRi/sx91uVVF7cd/6XG9oCkxSiQsgpdzhx5md2wo7NaYd1QOHP2m+9ta2qXTD/X7hpfErVBDyavihToDp90GleVVPHa97lzrkSrA+d/miTH+kiafz35Gi8XR34VXs8foIowFTPMUbgBDqKPdOmnjj6sk4AkMqarL8UwKSbJl
    • ovh044.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDCX5EwhF8Yl6xuBgQtnWQqLMq9yHn9e5ubo5vJFY/TaR3wFBtZMlVmQBn2PTe6XO6tQOFLx6O3V1t/mrpqh/9bbR6wY/NAW5mkZYBuvbhyPC9Bms3RJktGYnBJ170qciuAFBE9mfTlm0t+QN7rJ/3Sszehrdl7Z/AUNzkh48hnoLcmA14fV5bsDxOWXQsM4uxPSp6hj+UPn8fTig0iLDhM7+vkzBiVvs9V4PAh+g1SpunuTIycS9EBbrlx1Ry3xaciaXycIe2HE70AQim8RpygQybTfZwmHNXNZ08hVH2b7bgPYmttf0HkPqz4BpWDPiLrL3J47pcGDPQQUREs4H5F
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • ovh
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 5ce6140385cccd66daadad8cdc12e5e0fb70ee3e
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
      • sha1: 4695a60617d83bee512a8cc8ca6a999b1205e314
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_client_limits
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-10-01 05:21:02
  • started: 2018-10-05 16:43:47
  • updated: 2018-10-05 22:57:54
  • status_class: success
  • runtime: 6:14:07
  • wait_time: 5:43:56