Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/damage.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2018-07-13_15:43:04-kcephfs-wip-yuri2-testing-2018-07-13-1328-mimic-testing-basic-smithi/2776514/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2018-07-13_15:43:04-kcephfs-wip-yuri2-testing-2018-07-13-1328-mimic-testing-basic-smithi/2776514/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2018-07-13_15:43:04-kcephfs-wip-yuri2-testing-2018-07-13-1328-mimic-testing-basic-smithi/2776514
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/damage.yaml whitelist_health.yaml}
  • duration: 0:22:35
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 2776514
  • kernel:
    • flavor: basic
    • sha1: cf2a3320fdeb89818377b3f4ed19bbf6af353b35
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2018-07-13_15:43:04-kcephfs-wip-yuri2-testing-2018-07-13-1328-mimic-testing-basic-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 7.5
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • workunit:
      • sha1: f266b9fd93bc4d57916b205f3cb55184324188a5
      • branch: wip-yuri2-testing-2018-07-13-1328-mimic
    • ceph:
      • sha1: f266b9fd93bc4d57916b205f3cb55184324188a5
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • bad backtrace
        • object missing on disk
        • error reading table object
        • error reading sessionmap
        • Error loading MDS rank
        • missing journal object
        • Error recovering journal
        • error decoding table object
        • failed to read JournalPointer
        • Corrupt directory entry
        • Corrupt fnode header
        • corrupt sessionmap header
        • Corrupt dentry
        • Scrub error on inode
        • Metadata damage detected
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
      • cephfs_ec_profile:
        • m=2
        • k=2
        • crush-failure-domain=osd
    • install:
      • ceph:
        • sha1: f266b9fd93bc4d57916b205f3cb55184324188a5
    • admin_socket:
      • branch: wip-yuri2-testing-2018-07-13-1328-mimic
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: wip-yuri2-testing-2018-07-13-1328-mimic
  • seed:
  • sha1: f266b9fd93bc4d57916b205f3cb55184324188a5
  • subset:
  • suite:
  • suite_branch: wip-yuri2-testing-2018-07-13-1328-mimic
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: f266b9fd93bc4d57916b205f3cb55184324188a5
  • targets:
    • smithi004.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDxHCMXErXEJnJa5cWvsezq0MA9kLnZjDROPbm6P/HGVy4hU7hK9/1mdeqLjKKWMKNdkVCWTGEqsuPK8PnPzC1KUlk7MHvWutF7RZjgibbaD9beDiuD1oORC/gW/ytb8kE/SdFXu2Me+6jKWx6cy/Uxw23ngAoNdVb2dFd+ImokLtyICngowR3wHEIGdzo6iHxmo2QI4vXX6urGEKD+9o5+u6QUtC5owis/Pe9KT48g4zzAoPtKJ1rUe2/xLJyXuQcWHekl28ihAm3wnl7NfoKVQEOKTLuRMuw4vi1HIOTZx2yxSdnd56FZ5l1jP6J1Kh5XxFFISaAzHXocBDCz0wrt
    • smithi082.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDsenBtlq3l/wRJ1vgMMZxoezpAS+rOpLOsFeZFKa/K/GIS2nCjy0qzLonXpuzNAXj49LK/hLKNsS69baW1LjyyLF+qug8xdrGBi80G5Wgq0kvFlmQWpLKlbZqWj/ylOddHXanmvxhYWmjKXChvtkaVKjcEkxxKCXOC42wH+XfaeS/L7x44RB6QzeplVt0+QdvxDFipdrqyu++FOK5eoZUx5semO7n4ZYdNhLehRaBmZCk+oIAqNWasdWfO5RVa6UxDxh8tcexRLVcT/MwOl1mwy1/H0CPchSXUxPvO3igKJM8A1BFpVNKEWqqLF1cyCKE/lFMQQpeqJHbwR+UeZmHv
    • smithi170.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQuaKXA9Z1k49zxOqVmfR8HapBKyXSbnEsImH9qqY15tklcND8M1so5CNV96MP0Gyqi4N98Til3UJQVLb2ws++6Bra4km5+jJC6Yyv5I+3GZRNmBvEJImW/4RnboSq0ZHw/YpqoEdT/a09my2VWHcMZJzGJiTWAuIvh/KMCOzlKt6ozLCUF4gf39MI3KMGJsUQOBtE9kI9arU1VkcXGZe5bq1biIAZvr6ughbk/MHeHU5MLYl+Sy7Y4tfXsPbOOOcSNs+jpXsmlgy5+2bezZiMrbouZWN92c/DsfwNt3/aBBpBUyap/W2AIMiDOCfh+9Ly4XmV6XxFcLFzuX73IzX3
    • smithi066.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCgmYKGxDKJ7jrIInVmRtPfqdPIXO/xi0LuLOjAscfZrofxFSZzwRth8jvMPHZ9csY9IskKLKoA8iBJCMFtlOJybQqZooimiPbkb0r47z/LKEv7T8dRCqByf+EqDxt3UVNFJg4aF8KcotpbDNn+ooR52iXO9//sziaOjf6Lis8tRWGd7eIv9bvMoxh/MG8XBo4sv9ssmPLnO0+20F9wLxJ7ToCTtNS+TQ3zqqf9uJlb5+FTzRyFQr3A1tSAWXTTRn9j71wBXHefzd3o81P0SOdL2R768V6cfJz2wgN8VfiQypLnr1PUjgYVfFjr5lKFKtZ5cmTBw3lBr8uZmD2xTx8x
    • smithi092.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDp9woSrpjmpLAIZIKc4FqjlQ3Tl/AIbhkE1Z8Y5PzRFSGl1ITEj5AIh/lL++As//7luCBaIKecES5jMoz86pgiCO/U+euvEtSyOgDpFasDoDjNctQtQJPAxnSY1ELVUar9BhekuyUXs8vO2GUuY/dNLo5c9OCXOM+5wVp5PhGoJz/d7EdtpfJw4zGNxkds4xpQMGKjvUAH7z2YAvFQ5ICIndMgQz4Hau3BpML9G/ihDy0NndH66bIvZdgLfxBldYzXZJ/h5Z2+qZQqB8JkwjqHflkbLPXk6b3yCDPp1uU7P5vmhEwbg5jPG/PIl4Q+vo+HlQb2vkzk+lN2HOTG6eBL
    • smithi160.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMkCMtyoHVp8BOC5xjAjTDL5UlVd5Wcxwr+L1TvuSh6YpBTxS+YrDbMM4/rVtbhjraAt1bDoueM1XLqerXcCg7bcBEj9Jc6jFvG5HXvdsT7eaw0cOlOzMWjTHWMPEC40Z/yAehl0uwxhZQJwqa2wYll321EL9yFgAikM6VvSkul9et5740p0lxMI0J9Y2eMZVwu5nB9ZQHB9qi6OWgYBuL4yddbqjEYG8Xns4Bn6dwXMJU/b0W4gPPET3FKb7b6AjJFJJQcmRoqwV8PqzlpPQm2A4in73PziLgL3ASC3dTbC2ioqGn5xIPmdmB9bsfyBd1/+rBYYgZgJMuwFihn5Cv
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: cf2a3320fdeb89818377b3f4ed19bbf6af353b35
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_damage
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-07-13 15:43:44
  • started: 2018-07-14 07:18:43
  • updated: 2018-07-14 08:12:43
  • status_class: success
  • runtime: 0:54:00
  • wait_time: 0:31:25