Description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} kclient/{mount.yaml overrides/{distro/rhel/{7.5.yaml k-distro.yaml} ms-die-on-skipped.yaml}} objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/mds-flush.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2019-03-11_03:25:02-kcephfs-master-testing-basic-smithi/3707926/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2019-03-11_03:25:02-kcephfs-master-testing-basic-smithi/3707926/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2019-03-11_03:25:02-kcephfs-master-testing-basic-smithi/3707926
  • description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} kclient/{mount.yaml overrides/{distro/rhel/{7.5.yaml k-distro.yaml} ms-die-on-skipped.yaml}} objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/mds-flush.yaml}
  • duration: 0:21:06
  • email: ceph-qa@lists.ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 3707926
  • kernel:
    • flavor: basic
    • client:
      • sha1: distro
    • kdb: True
    • sha1: c229be98a86f3ad5b0374051c338b1f09c62f005
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2019-03-11_03:25:02-kcephfs-master-testing-basic-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 7.5
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • global:
          • ms die on skipped message: False
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
          • osd op complaint time: 180
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
      • sha1: 096fcc55d777b4c4bde7b993cd93dc59b17142f9
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: 096fcc55d777b4c4bde7b993cd93dc59b17142f9
      • branch: master
    • install:
      • ceph:
        • sha1: 096fcc55d777b4c4bde7b993cd93dc59b17142f9
    • admin_socket:
      • branch: master
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.b', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.c', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: master
  • seed:
  • sha1: 096fcc55d777b4c4bde7b993cd93dc59b17142f9
  • subset:
  • suite:
  • suite_branch: master
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 096fcc55d777b4c4bde7b993cd93dc59b17142f9
  • targets:
    • smithi156.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDtFByRAKuJD8GncaiE2mq4n1fVh/ZNEVu94AOa3fEpPJwtboNzAvmlROuKMipB84BEx1IXoE77m1iTT5h2MrFc9u2ZH3ubr/2g+k29OXZQeUaDDOEmpfHeM0xUIWz+91Z7Q+oQ9D+4WG7T1fCMfhoy2G394uQsByL2hf83FaI4gIJux29oUBgWAW0DsIlQ/qZfOJu9sV8tCBI9eS+9DxZtWP0dmv7opgBSGdlD0C1sX6vvFlCJ5YXW9xBSQy5fx3owQ/oTvHvWiGEYVyCur1wIcw+fzCYKJQ5Gph/U7T1D4oWc1UGR8sz3ZiJshoYYUnmSxym4c9Ce6Z3WmXAVa4LJ
    • smithi143.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpDF3ZFtLKBK1qr91/rWCffg+fFQ0CW7Dvw1ngv7RmcRVGfQVQBw0IPINGmdLzfjThDvz4Cg94mRcytSv5wK2zF6hEEG1EWsrw3A9Fnavx25Y4KWWUCMOX/W5Iuo6bn0lSgmW/JIkhaCWjjm+hGVKCjL1cApZSW4v+uGMpiXm0wS2QS8yj855afCXtYdAYBuE6oqjH9giVKoQaGi7FUcNZ1bWcjr0OLnhQLVTNBouI5MvmAXlWzFA53F8SQHj1UiKtwUGCO7DvoWKxBtVrwVlsRWRYb5AcmhRLikgeZ78K7v7F6OSeqsevxJrzeZqFU8pxTBk2ETIFYS0QT7bYiQJT
    • smithi116.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCi8vh2wzuW2f1eoe0XuNz+dvfrPv9jcp3ina1XcbdAVstz8iB42f3+vOQZystEVAtYfprLRlj2bLTfZr3bH6Qbe1lyEEzXxobcl/GcBdgWdAz2CDZreUGJ0LGmBWWRMplPHJClALwcVp0YYh5T8GgvXw6L4GVw+OTQ7nTeyU1m5t187n8ATp4qLiMFo7z5fLKi+VvEu/jD/Kx6dKcm+ZfHp/c4crCD0fAXIgmElQYWF4YFNkQvIgS8M6q1ypy/+oq9jLi8TFxebzUYjQWyZGXuCPZwzcO6SGxUTzvxPSjtISFB3HHZS9YsRhXr/xSMZzr6/WWzQG6ks/qO+YzNDfPF
    • smithi068.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMcf7QdaR2siAFnAR2+N3k8xEAFrNJ3hTWIsCqAGEPs1L/NNpOXuGcA15OTGnTAOIQ8pXTkKZqLNqIjED5hmrGWsPrbOM6EM6xkkgl+P2abJCH5CRhoOPBvUV83UNBnddB/OsYDusMIm/yVJJgTmnU/ERW8IO4Of6JieHKLbMccJON2WpOVpLSmfEU26jQCn7ZHu1N4PCFXkvF7BR/lV4vOGZwVMmwYCBB/aQ1XcCsvYnFek/QI9gSVwHRtJajsiCcNyMikWkasRhEPhrRAlJnvnHYr/TwOymagXpPC182mefv0HF6P32DGHQ5+zfYv+IT31wO7wgctY4tH7+Ov6kx
    • smithi144.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGUoRmphNoTXD+DARytPWaYbGp7sCEraZRA/WwZWCImExo0ov7vdxsO+sD43poe2JOy5lqJs9pdAe24YC3pCyvTw45WhZ2f7QZm8+DKo1xUgdManZ+VZ5Slk5Vf8gt6QfUS2agwMdzCEKrqiWkr6G2F8gg70QcDxvyviRNtYZmFkrqoSSDyQGax/HdNczI2YKhIaf3GyLnU7BWP67lHHlZNCpMkd1D5PqI6tssICmur4D4m9oTn0gBow560OrA3EaTgjMaXA+utVeEZjq6Vu69LNuh8umjfwwEL/JMk/GQi2WzpQFQaOBO494eKwGC8ULAFVssQnaHmyY+XU9h0gPx
    • smithi063.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDE3U5HQbDeCE6C/lgHXFntKThPIDtwnD1S5181wdEnNHUmW2Xd7davLb62aDQjUwQ6yPsHCTBz9/rt9Q2Zw4U2zV7s2+N6+wMH7lVV7tpvJsnL//DC8xzysIQvYjLDWFvfaseZVTJn/RW0dwiyAU3uwnrS2L4EYdT+8BNcxSQc4QbU/ZmRLpfWzh1KXJBly5I3CJ0YJdijxf3wuonXh6fm68EokTSOsITpBGTbnV7V8kh+obSYAqvqqAzE6sYj5tHe6Fb/N+RcNx6Y2kLLE/dq3/2YMPHBWjv1iiIkgdEL8OqKwD9aE7GaqhtFbUIR3QELH8Z4gZS0k1zKhlhXVret
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • client:
        • sha1: distro
      • kdb: True
      • sha1: c229be98a86f3ad5b0374051c338b1f09c62f005
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
        • rpm:
          • python3-cephfs
      • sha1: 096fcc55d777b4c4bde7b993cd93dc59b17142f9
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_flush
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2019-03-11 03:31:13
  • started: 2019-03-14 09:35:52
  • updated: 2019-03-14 17:01:58
  • status_class: success
  • runtime: 7:26:06
  • wait_time: 7:05:00