Description: kcephfs/recovery/{begin clusters/1-mds-4-client conf/{client mds mon osd} kclient/{mount overrides/{distro/rhel/{k-distro rhel_latest} ms-die-on-skipped}} objectstore-ec/bluestore-bitmap overrides/{frag_enable log-config osd-asserts whitelist_health whitelist_wrongly_marked_down} tasks/client-limits}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2020-06-16_18:30:21-kcephfs-nautilus-distro-basic-smithi/5155357/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2020-06-16_18:30:21-kcephfs-nautilus-distro-basic-smithi/5155357/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2020-06-16_18:30:21-kcephfs-nautilus-distro-basic-smithi/5155357
  • description: kcephfs/recovery/{begin clusters/1-mds-4-client conf/{client mds mon osd} kclient/{mount overrides/{distro/rhel/{k-distro rhel_latest} ms-die-on-skipped}} objectstore-ec/bluestore-bitmap overrides/{frag_enable log-config osd-asserts whitelist_health whitelist_wrongly_marked_down} tasks/client-limits}
  • duration: 0:21:14
  • email: ceph-qa@ceph.io
  • failure_reason:
  • flavor: basic
  • job_id: 5155357
  • kernel:
    • client:
      • sha1: distro
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2020-06-16_18:30:21-kcephfs-nautilus-distro-basic-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 7.8
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • bdev enable discard: True
          • osd failsafe full ratio: 0.95
          • bdev async discard: True
      • bluestore: True
    • workunit:
      • sha1: faea8ede3f013361e4aed12b07c6b741cf827f7b
      • branch: nautilus
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(TOO_FEW_PGS\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • responding to mclientcaps\(revoke\)
        • not advance its oldest_client_tid
        • failing to advance its oldest client/flush tid
        • Too many inodes in cache
        • failing to respond to cache pressure
        • slow requests are blocked
        • failing to respond to capability release
        • MDS cache is too large
        • \(MDS_CLIENT_OLDEST_TID\)
        • \(MDS_CACHE_OVERSIZED\)
      • fs: xfs
      • conf:
        • global:
          • ms die on skipped message: False
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
          • mon op complaint time: 120
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • osd:
          • mon osd full ratio: 0.9
          • bluestore allocator: bitmap
          • bluestore fsck on mount: True
          • debug osd: 25
          • osd op complaint time: 180
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • debug ms: 20
          • debug journal: 20
          • osd failsafe full ratio: 0.95
          • mon osd nearfull ratio: 0.8
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug filestore: 20
          • debug rocksdb: 10
          • bdev enable discard: True
          • mon osd backfillfull_ratio: 0.85
          • bdev async discard: True
      • sha1: faea8ede3f013361e4aed12b07c6b741cf827f7b
    • install:
      • ceph:
        • sha1: faea8ede3f013361e4aed12b07c6b741cf827f7b
    • admin_socket:
      • branch: nautilus
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.b', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.c', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: nautilus
  • seed:
  • sha1: faea8ede3f013361e4aed12b07c6b741cf827f7b
  • subset:
  • suite:
  • suite_branch: nautilus
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: faea8ede3f013361e4aed12b07c6b741cf827f7b
  • targets:
    • smithi089.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxOqv0KTqofMB2JXjOLdwWu/dAGdnawyTngGIE3S4fH7AAYrJ3nWehIm7vyN8v0JgHGfLkCT3zFCMRcFz42BNK5frnxZDAoLv5a1l0dsSqZK/2X3n0s4T7vvC2JCrD9K3XzUjJuH+RktHn6mci9mnnHhzdSjyMaIFCPLFBn8Zj/Ju5b5dSr9hw7XkmwcoPTt4uy+m85XdAwPH0BXrjxrIVHUX+6MbvkCGWm/IQmow2ZCfC1d8lMvQveZkH4N9etQYyZ7wgpL95nbv2RpI2zo4tvVr38xqIr3xaLMvYJt9sVx+0VQh2blMb0B9q5sJ70yKcPoH84y0P4gB+Do2FlkRd
    • smithi087.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDa6L2uWt+E73nd/5XDo1NwItfi6aEkSEa+njR5XnbIgwFXlz0TznTCfX6J3q8CPTPjbHSDoL1XeH/b2MWTFSuBQ3exqh+wNz/ajLVUzfXC7gdYm8K0b17Rw6lN6B/8VmdNIVC7rtnhvi/GTKwvr+RMGm5VleLiJ2qGNDEEgSz5jr6fWnYSJ4XzpPfDZCm/zsQMobgvXYVLyKO1ufbKIelJ69fJRo6BbT3gopGdsLHxamPPaGv/IzncLAU3llP7qywdDYMA0em8FjX3EWDnoVZe9YBS51hUz0BgGTZMOlsFL0bGEBH6DAKA2GwcuXqCCd9SpjBQVGn0VzXzDQGoIDWD
    • smithi026.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCjwAj4y7tndYKGQJIF3Mhc9jrNAwtQCqd73NeZc/x4xktsRvafXmUtt5VTtwpm77+5Y1ax7ybdCHkVwRBrKtlArJb6OBNce7nslcO8c8TSSfsluCHAQTW2WOogm7uz/TTsHjAd4qUFOgJUOUYfveXWkK5CjTHtQeAZ1RB9LbL/X3TzZKQOH8ybxMwceW582Nyi/pnJSaoJllqG0DRBHj/XTbwx2ijMKBd6pMH2twP3KBfR44hRqNq5ylFelnQipzghw/QokNvdrrpG1mS4yy5+BHXWZzL5aT9GT0qfifgrmiyV1/ssNOHtGQF/xUbAu/Z8wshYvrPH0mnMhuRQMvbn
    • smithi186.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDj5Tj0l/oTTz8NaPo5TcmCYFa6Hq4tOQMVQu5x3DMWbkYIqXMbK8UjMUdF3/yZNHTlcR/RyzaTCj3gr7TXd6HMc37xtTeZLof/KzE6B7+OTpKOkiIi7zx80vqEvJ9WNiyzAm3H0KgtMkw72dP6V0hNE0SqIF/X0EJ64D7eh1ipKCDjlin4PFXpmySQQADPRebINQc1Efmy7k0SNu2OwBGLZBCS02hPCyqNDMUTKFrVWhF4BkDRMWG17mi30jN1LnfgSbuiS7yZRhpMpBnJzf5hJ24QKpxrNm8BkdYOIBqkHzW682OhbRFFKR102d6QqEvC/in9rQtTknyCaoGTzkUH
    • smithi092.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuIWjO/q1h2+KC1IK4CoCA++7o4IFAJj5zVOLt94jWLi6wdxzwYamwBE0ByZl7fxFaijQ/DRed878Ky4v2d+ux0GGsLcVf4ecLlKISUYZW0eU8/Oc2fH7XdrYrFTcQo9In2VgWppCoHznkeCM+Me/iDFxspMU7UJjuc1mu9aoMep4TgXQeP8HQZWpjQnl4JvdP0dIY17N6goxJHA5701bKJjhXyAJxaFNV5mwubILU220mhRToHy34CydWakzJDeIjaP7o9GcbLByn0WOWUG+KFzQdybNpW23QDLWCrOg8taMw57+bbuFTDbHyAbezWjavHRA8/KNdW78hNmHPZVtx
    • smithi065.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCYmb2UJ5pTknQyo419wx0dE9Gv2Xh8JeO/vzJ1ygmngC76QXtJEdWEkyM77QV1y5Goo7QnEVMVcO/VWe+T2ZDBJDePBeyMfITy7eCNdL+T3rCs6k8ybAqMbG4DBjOy23L6f4ENZKGJtZgQSVSX0Ke+y0dHsHr1q6KPe8fCRFkZMw+9hKMtJROiGqUfShU6bkx2kvtXBXxuw9gVgxKFe8x0XDJ7uMvHm+BatYNb4Bk1vMtsLwbDs0CcMm8bwQSZdyxGzGN+nb7iwWbZ5ANmRMAQy6GjOUtv1glnTlKx9VWbjTVzPO5JBx1q3KD1eftIHJfTcamtKsR0euuXq+k1roy/
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • sha1: distro
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
        • rpm:
          • python3-cephfs
      • sha1: faea8ede3f013361e4aed12b07c6b741cf827f7b
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_client_limits
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2020-06-16 18:31:01
  • started: 2020-06-17 02:15:21
  • updated: 2020-06-17 03:29:22
  • status_class: success
  • runtime: 1:14:01
  • wait_time: 0:52:47