Description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore-bitmap.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/forward-scrub.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2019-05-16_23:33:01-kcephfs-mimic_v13.2.6_QE-distro-basic-smithi/3959684/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2019-05-16_23:33:01-kcephfs-mimic_v13.2.6_QE-distro-basic-smithi/3959684/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2019-05-16_23:33:01-kcephfs-mimic_v13.2.6_QE-distro-basic-smithi/3959684
  • description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore-bitmap.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/forward-scrub.yaml whitelist_health.yaml}
  • duration: 0:21:21
  • email: ceph-qa@lists.ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 3959684
  • kernel:
    • sha1: distro
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2019-05-16_23:33:01-kcephfs-mimic_v13.2.6_QE-distro-basic-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 7.5
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • bdev enable discard: True
          • osd failsafe full ratio: 0.95
          • bdev async discard: True
      • bluestore: True
    • workunit:
      • sha1: 2bb04a30ed0a4331092a8633c716caa0fdd2ea61
      • branch: mimic_v13.2.6_QE
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • inode wrongly marked free
        • bad backtrace on inode
        • inode table repaired for inode
        • Scrub error on inode
        • Metadata damage detected
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 1
          • debug filestore: 20
          • bluestore fsck on mount: True
          • bdev enable discard: True
          • debug osd: 25
          • bluestore block size: 96636764160
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • mon osd nearfull ratio: 0.8
          • osd op complaint time: 180
          • bluestore allocator: bitmap
          • bdev async discard: True
          • debug rocksdb: 10
          • osd shutdown pgref assert: True
          • osd failsafe full ratio: 0.95
          • debug journal: 20
      • sha1: 2bb04a30ed0a4331092a8633c716caa0fdd2ea61
    • install:
      • ceph:
        • sha1: 2bb04a30ed0a4331092a8633c716caa0fdd2ea61
    • admin_socket:
      • branch: mimic_v13.2.6_QE
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.x-s', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.y-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: mimic_v13.2.6_QE
  • seed:
  • sha1: 2bb04a30ed0a4331092a8633c716caa0fdd2ea61
  • subset:
  • suite:
  • suite_branch: mimic_v13.2.6_QE
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 2bb04a30ed0a4331092a8633c716caa0fdd2ea61
  • targets:
    • smithi139.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtohLgKHqBLvlWRzE1VVqRb1vzbcftlwCCA7cXSSetTzI0SJe2vjKXnsSEu2CsgJZoaHk9I9k5I2gWHPOoFzn7KQvco/+VcxgF9auXmPP83qkAx2QZMxju/lJkPt8Z0epSZlXzNk5G2MjJR6UojQK9tvRsuP9YnYs0kAoFGJO+CETAAXDS7HVRZXHZC/vu75rDLusEJKv7F8yItbycW+6/MfX9yQRWv4qRl3KDftSCB+VC1u8iuh5vgN5B/bWn+jDzjwGWW2M0k6GzB8ajhlVCfgAjh+FVuS28ZpQRehtjZCYerEogOUiZZSBNDY/KYQSwuMJZ3NUqWuLlWTkCOBBd
    • smithi177.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCjI47GZs007pPyWFUsvmG/OIeW5cZXdEp7zDYKRslb8L0Ou+4QtP7Qf3cjKxqE/YuhNC3WA8nVjeZoLGe/QKTDZMe/EbRoPECgLHp9zQDzrphm/a+QliwdKXV21ow5u3ZVlfGco1LiC/uDFFeHnmlMgz4VQEWrq0hmHY54PZb6gRIMa88oJqfsUwKVXPYC6f5ow3tW/kycb1i/sws/kX5P3PJpdIlHUa7e7Tel3OlpwaOKGni2SQcnboxUWwa6JE33akCNO3NiGU4aPB4LAAy1rcEYgNMzMEpFIXZQVREJ8Ug9Ub8XakZG9uG4nl9BO4uXxbwbLJyCNJtfhZZiSzwZ
    • smithi106.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDqFbK/c68NPBX5cOx3hRZLZ8r/SagACok8an0CbGdDUvRq6R4XCJaJw9waTic3OhEAM+meMgtz/7d0zuR06v3XdYc5mEi8D0xfXS+v9R5WsiO0ALELmJ9Yll3gHacTcuXhe4pO/jLwAkSxHqNJXhFcpV88aopcv8DC+uJjiYINPwHnRqucXktrUDEJntTqiTfZdurCHSxuNPCb/5UA74U2vbphkpKULlnN8+haTdOxexKFIc7iXxVJ0ZviGoEX1rgfpt0OApEGYUCf90Rbt/Zkkfo9GXOS5t6K2uuRFZDnkeJZLsUDlZ75KGNH6WTTedhhesfFYwMjUI9kHNBbm0Qr
    • smithi202.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ8tfaINtXxHYLfIs4bS7AvHxpdFirYImxNZjU2d208gruAtkkabphPRbKjAWIx3IWeJioQS8ANB6znuX8kvRMLe7iwUVz7oQQzgfCon6TU1jjnH9U47hTLpCKlpXjswUq16AMV/ICQQ8Uy4SezyEJ+2Vh2iErwn7jeZ7VR3F4bgLIvHqzN/8uLLWgYu1947+5566eR4y5x71+LvBcf4dNTR7H1OeCczto+g6KmjT+3ahRCyRNUzr0zO0PNAHos+xkeSCLzVg1EVjfFHio+ZyrUQ51C7BStVCAsr9zyMIh7xbx9t5pSxaldEdChJFA0imRaghPDzcSzQaKQft7XhTj
    • smithi043.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC95BhP8lkrcRn3I057rvstr4ygh8p348enWZfMAfMizNA3qX3QrmoKwlZv7lKebXcvaDZIi5+yy6Iklt9sg3Bz0teV1bXPkSo5XDtx0LSfntGiDbBfcwndVSBGFBWGFQlc86rtC5YY7U4IHTqzExrQAXpxkBM5bGaoYPNAICD222ORySYgoOh9y46uehEyyVoW1Og7M9dGWL6mqJynNvyPMGrhBcJScdUQ86GLWysGd3jIqfkJH3aElauwO0zVI6WDyv7LWIC2vbHx7/tyJwqC3eDt9AViRS850Ex2hGtFgUk7IpRKeUNt1AI4CZkiVjPlJEy2KlZFNXQ9AXhS6Ean
    • smithi124.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDVqWZxmNkxTkG8Wnb5iV7+xaCIWii4N/Gc0RT4eEJ/utKQlnclqgb8Mqa5cIwFKIiJLNWx3zP5mPVXZuW3SXAQaixU1UMUxzBnuyXdoq1qknEVoH2oHxefBNCKTGKXeZ4evqs8gMkd3PUgnpCsF5qFjQveBtlmUzkcCNvp0VBUNMvqJsUEuOQE1tWTp7PGndEi4lxaeKWPIz7tkOwQocedXTizneiaCUVsRep5D9GPz9IJQ4wK4rWbX/WDG1/5YEQJIVQQpMaEdkMrgq7QFyci26x02bGkyygAL3Xu8UTkH3wpa1tirGW/F6gJGM8QdXZ0X28mDCIj2FVkqkrWk0t
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
      • sha1: 2bb04a30ed0a4331092a8633c716caa0fdd2ea61
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_forward_scrub
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2019-05-16 23:34:51
  • started: 2019-05-17 08:35:37
  • updated: 2019-05-17 13:13:40
  • status_class: success
  • runtime: 4:38:03
  • wait_time: 4:16:42