Description: fs/multiclient/{begin.yaml clusters/three_clients.yaml distros/ubuntu_latest.yaml mount/fuse.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/ior-shared-file.yaml}

Log: http://qa-proxy.ceph.com/teuthology/pdonnell-2018-04-17_20:12:38-fs-wip-pdonnell-testing-20180417.182728-testing-basic-smithi/2408087/teuthology.log

Failure Reason:

"2018-04-17 20:59:59.831793 mon.a mon.0 172.21.15.68:6789/0 76 : cluster [WRN] Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX)" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/pdonnell-2018-04-17_20:12:38-fs-wip-pdonnell-testing-20180417.182728-testing-basic-smithi/2408087/teuthology.log
  • archive_path: /home/teuthworker/archive/pdonnell-2018-04-17_20:12:38-fs-wip-pdonnell-testing-20180417.182728-testing-basic-smithi/2408087
  • description: fs/multiclient/{begin.yaml clusters/three_clients.yaml distros/ubuntu_latest.yaml mount/fuse.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/ior-shared-file.yaml}
  • duration: 0:08:19
  • email: pdonnell@redhat.com
  • failure_reason: "2018-04-17 20:59:59.831793 mon.a mon.0 172.21.15.68:6789/0 76 : cluster [WRN] Health check failed: 1 filesystem is online with fewer MDS than max_mds (MDS_UP_LESS_THAN_MAX)" in cluster log
  • flavor: basic
  • job_id: 2408087
  • kernel:
    • flavor: basic
    • sha1: 99aaa89478069a7c9adc6484c88b2f905abff3a9
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: pdonnell-2018-04-17_20:12:38-fs-wip-pdonnell-testing-20180417.182728-testing-basic-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 16.04
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
        • \(SLOW_OPS\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
      • sha1: e86af94f73d6fcd7d6e4ce74a4a225d93d639523
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: e86af94f73d6fcd7d6e4ce74a4a225d93d639523
      • branch: wip-pdonnell-testing-20180417.182728
    • install:
      • ceph:
        • sha1: e86af94f73d6fcd7d6e4ce74a4a225d93d639523
    • admin_socket:
      • branch: wip-pdonnell-testing-20180417.182728
  • owner: scheduled_pdonnell@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.b', 'mon.c', 'mgr.x', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['client.2']
    • ['client.1']
    • ['client.0']
  • sentry_event:
  • status: fail
  • success: False
  • branch: wip-pdonnell-testing-20180417.182728
  • seed:
  • sha1: e86af94f73d6fcd7d6e4ce74a4a225d93d639523
  • subset:
  • suite:
  • suite_branch: wip-pdonnell-testing-20180417.182728
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: e86af94f73d6fcd7d6e4ce74a4a225d93d639523
  • targets:
    • smithi068.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9WoEUPRC+xYlx1m9YoCY6xapuumqt8JmjRRrSCcp9g+D/ynKDwvm9EI98RZ2fryISg+lFfSwchAKDmKFqNau9KDBZVwhRt8vfswJ/xTryVyd3Y14OrHlOr1CDxv6bYDNv28DenZSGj0U6bj0NtcfhDH5ULGySO2oC1weaq2Yz6CrIFljTqL4oexvZ/jbNb6YirbUBkigEJWcQ1JTD5Qo0oEcCyHNValgVaXLFb385ltpIvxIDpE/ZxYY7+IWBlbGuSg97xN/mjHegE0N+NZh48AKxwnhvEqBQmz6j1/KLJHVC5XuBqn1ZtW1yf9u4rB+7euIgGLrb4bk5zxOe4edv
    • smithi059.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDsMssdHEyqozUcodSo4wZIZbnk5eAnwYUW9brJ/Y8lkstcCmTWjDx8nICstlvq6SNHvGkwzTKFj1lMzZ1KYQ0bm1Qoze4KxSnCgqPCLJkoV+QEJu4+xZQ3HN6ho/fivrpnQLDQk+Y26ILNnL4RR5GGnqiz5dfxFSYNc2MIfMb/elMg6fXZ9gvGvRyyP3TOFVyeT2M7jZN7G41pEYC0VBGO4zLOzejIPZcxRpfByLfge2Wc+V4IzvW+oQCBoxvut5AXQyn0s6hOm1A1zV7jTdQ+3+vLjotEDmL9Qo25Q+G3X/KeyZLbrTB+txgnlkEdYT4j4l0deQ2KLbByzKQo50Ez
    • smithi052.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC92WBWHJgrWauCCQpM61mBf8Ng1N6prt4wYyaus2+eeYehajzZotRkCknetSjbsPXVWcNxb6p8zsR5wZMCIl+VCNyCx482jDRqiISAwdMcjWe7HlwHsAr3FfScRAnRyU5CjA5L1D1ENqmQrTioi2KmaBgIXG/b+giXtQSIpy5mp1lvZ3/1OoKo1T+CBhkrfORx5t/ZqeFwefME32IxV2f25k1X2E+OxVO5JyIUsmsomMrmOjJleHFhWNEMmZfXfFKC2112wfcYHWlmZ5dqj9/6/2Pm+iywgYn00YxoFSrGadunVwS5Z2kgNnvMh/fpDOFscHaf3jl/IuoaNBqQBZ3H
    • smithi175.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD8wzE9PwHpBaAoSlB51vtD0LTdUMbIJU2nL6IR/qmPzVGoR3UDGAwgqGjUMR66jBHuajj5/mM8WrrQCDPsBuNXYfDyQutbKEFBYSxV5JUlxBSzQfyFClQoEY7I7XB8cOj6XMBm8Fv69xDj/AmmDQJmLJxzqIodtuW2eicISyuz6v2hIS9nSysDr5KfMAWxLszx0yjVZZNLU3LTXG/mnZX/vb5/ducVOA85bKSelAP7wOBIn8kmKgDKvf37bihQywsg/k/UtofKX+dpafDEfxXPcAtSnraZ2DLtRA8D/IXcUG2QwuzVHx2LymaGZ/0kUqQWF7ErYJcZ1IuDKZibq3Lt
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 4
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 99aaa89478069a7c9adc6484c88b2f905abff3a9
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • ceph-fuse:
    • pexec:
      • ssh_keys:
      • mpi:
        • exec: $TESTDIR/binary/usr/local/bin/ior -e -w -r -W -b 10m -a POSIX -o $TESTDIR/gmnt/ior.testfile
      • pexec:
        • all:
          • rm -f $TESTDIR/gmnt/ior.testfile
          • rm -f $TESTDIR/gmnt
          • rm -rf $TESTDIR/binary
    • teuthology_branch: master
    • verbose: False
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2018-04-17 20:12:52
    • started: 2018-04-17 20:33:57
    • updated: 2018-04-17 21:03:57
    • status_class: danger
    • runtime: 0:30:00
    • wait_time: 0:21:41