Description: multimds/basic/{begin.yaml clusters/3-mds.yaml inline/yes.yaml mount/kclient.yaml objectstore-ec/filestore-xfs.yaml overrides/{basic/{debug.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} fuse-default-perm-no.yaml} q_check_counter/check_counter.yaml tasks/cfuse_workunit_suites_fsstress.yaml}

Log: http://qa-proxy.ceph.com/teuthology/pdonnell-2018-05-23_14:53:33-multimds-wip-pdonnell-testing-20180522.181319-mimic-testing-basic-smithi/2577418/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/pdonnell-2018-05-23_14:53:33-multimds-wip-pdonnell-testing-20180522.181319-mimic-testing-basic-smithi/2577418/teuthology.log
  • archive_path: /home/teuthworker/archive/pdonnell-2018-05-23_14:53:33-multimds-wip-pdonnell-testing-20180522.181319-mimic-testing-basic-smithi/2577418
  • description: multimds/basic/{begin.yaml clusters/3-mds.yaml inline/yes.yaml mount/kclient.yaml objectstore-ec/filestore-xfs.yaml overrides/{basic/{debug.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} fuse-default-perm-no.yaml} q_check_counter/check_counter.yaml tasks/cfuse_workunit_suites_fsstress.yaml}
  • duration: 0:09:15
  • email: pdonnell@redhat.com
  • failure_reason:
  • flavor: basic
  • job_id: 2577418
  • kernel:
    • flavor: basic
    • sha1: d74f60ed756ff5c8749baebcf5c63b47cda60970
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: pdonnell-2018-05-23_14:53:33-multimds-wip-pdonnell-testing-20180522.181319-mimic-testing-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • client:
          • debug ms: 1
          • fuse default permissions: False
          • debug client: 10
        • global:
          • ms die on skipped message: False
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
      • sha1: d50e0ac074c1a754afcaee75f57828752eb56075
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: d50e0ac074c1a754afcaee75f57828752eb56075
      • branch: wip-pdonnell-testing-20180522.181319-mimic
    • install:
      • ceph:
        • sha1: d50e0ac074c1a754afcaee75f57828752eb56075
    • admin_socket:
      • branch: wip-pdonnell-testing-20180522.181319-mimic
  • owner: scheduled_pdonnell@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.c', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mgr.x', 'mds.b', 'mds.c', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0', 'client.1']
  • sentry_event:
  • status: pass
  • success: True
  • branch: wip-pdonnell-testing-20180522.181319-mimic
  • seed:
  • sha1: d50e0ac074c1a754afcaee75f57828752eb56075
  • subset:
  • suite:
  • suite_branch: wip-pdonnell-testing-20180522.181319-mimic
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: d50e0ac074c1a754afcaee75f57828752eb56075
  • targets:
    • smithi143.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDC0JDXyObdIPv6knymhsjYlSLqbZY8SUE8REysnVMKarywqXjhYG/NYkKuYdhO1h2BVHqyUtbuEwZKqj2wiT7CcqK2KaYjAVnhv6wBTe/ngSk9sf3SvgSbXHLqS9XJJDGzRakCeJj8zwIEDwxjybOqo4j8iAMF9aL3VKoR2jU/2phwTH+EmbVQrE+9udm3QXPHBVcS7h4r4hwR4qrFbIyz2gSX+FbuPpWO4xZF+h9S/1WmiaRCrf6e+b5CSGIACRyWTbOgMW0jyuq0fTHGivDU+L2741W7O7iC8PutM8uiV3Y0V7byB+T3n+1ID+sSTtNem/77cNq1F0uJfZERstQt
    • smithi193.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw2RTjKh+DstxptPqNKYquFbekXYToq7mfwTvWB9oinuw41PZDc6KAAyQD5u8LUptFPwaeqSRZOYyRv281v16S2UDsYq07d0DjYvOZC5GW1pLRg1KlO24qgKiuA7MQ/LF6nbWi6+OPkIz004Z5Q1t6RTuW2EK/xGEj8JVZ3VFDEso2th6EIdo+o+dQJBBOvxJLSpVh2sPvmENaMSIJtxvs1iI0+hyv9imeeY904k/G5+vqNjTC3t+BFpZCp0Wn0fYN/hgo16qXfSSuy7pSUYBA74Mav8LIiBA61/LC+sQPfU1j7jICtAf/KMMwgzUucRsuORVLCOgcXR3TlHNT7Knv
    • smithi186.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC84hOdAcErPCfIjjqWYKh4UawfcjZUcyik/4zqHiMbp/rn1V4Berkjy3v/wovhQt8BaVQywm7RVE2UrLMtQpIJbXyAej1hR5iAU+yOPVg5APHE+vspBwqYUSt8fqO5B2hD+lFuAV4YZz9PZjrd0mtb4yXl/1J9pMpnp6zC5EUdspY4iZ9NV9EblARNPx2UTZHEF0CBv9497kpLsIgljkPjxuFEi8DQSYjLaQ7by06SQIjfoTDc+eWYntorV/E05/QUaK5mXqZ/ry53mbj69oT77EhCkEbl5JHaaUiFnHAvhGfCa/uT4Ka8wiIXy84TVyL619zlcGiKW2fudOCIRuJZ
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 3
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: d74f60ed756ff5c8749baebcf5c63b47cda60970
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • exec:
      • client.0:
        • sudo ceph fs set cephfs inline_data true --yes-i-really-mean-it
    • kclient:
    • check-counter:
      • counters:
        • mds:
          • mds.exported
          • mds.imported
    • workunit:
      • clients:
        • all:
          • suites/fsstress.sh
      • branch: wip-pdonnell-testing-20180522.181319-mimic
      • sha1: d50e0ac074c1a754afcaee75f57828752eb56075
  • teuthology_branch: master
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-05-23 14:58:20
  • started: 2018-05-23 23:05:47
  • updated: 2018-05-23 23:35:46
  • status_class: success
  • runtime: 0:29:59
  • wait_time: 0:20:44