Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/client-limits.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/pdonnell-2018-04-19_16:01:31-kcephfs-master-testing-basic-smithi/2417193/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/pdonnell-2018-04-19_16:01:31-kcephfs-master-testing-basic-smithi/2417193/teuthology.log
  • archive_path: /home/teuthworker/archive/pdonnell-2018-04-19_16:01:31-kcephfs-master-testing-basic-smithi/2417193
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/client-limits.yaml whitelist_health.yaml}
  • duration: 0:16:02
  • email: pdonnell@redhat.com
  • failure_reason:
  • flavor: basic
  • job_id: 2417193
  • kernel:
    • flavor: basic
    • sha1: 99aaa89478069a7c9adc6484c88b2f905abff3a9
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: pdonnell-2018-04-19_16:01:31-kcephfs-master-testing-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
        • \(SLOW_OPS\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • responding to mclientcaps\(revoke\)
        • not advance its oldest_client_tid
        • failing to advance its oldest client/flush tid
        • Too many inodes in cache
        • failing to respond to cache pressure
        • slow requests are blocked
        • failing to respond to capability release
        • MDS cache is too large
        • \(MDS_CLIENT_OLDEST_TID\)
        • \(MDS_CACHE_OVERSIZED\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
      • sha1: bb9529221305433da967a3d1f4a4ce0530ec8f7d
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: ed96c3318129754f5fc0c539011117fd13b38c7f
      • branch: master
    • install:
      • ceph:
        • sha1: bb9529221305433da967a3d1f4a4ce0530ec8f7d
    • admin_socket:
      • branch: master
  • owner: scheduled_pdonnell@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: master
  • seed:
  • sha1: bb9529221305433da967a3d1f4a4ce0530ec8f7d
  • subset:
  • suite:
  • suite_branch: master
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: ed96c3318129754f5fc0c539011117fd13b38c7f
  • targets:
    • smithi200.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTuFGC+A5llzRJxul9a+LzpfmxEgKF9bLctzz8qZB/M4DhkLK/C8QTLuuINkpjWAnqXCA2F8dmbVIrQgPmBE8zP+rcSw/U5XHFiYz5aWttW3rl3YwXytSYAWVsTBcvTcfZb90iBWmOO3jswEkOkPlRhRNcYhYi1dzHkgbDVCkOtpscf8MnM+Qy+4fWxRi/a8hv/68QZcPAKyKJOvmtX9SSAZp2cZWPjYyzUvkLhfNDE700EeY+7N82rx5tIzFmYgAZL/tcykK2GTuR6dmNliATglO1pq1tLEB2EMHKx9okxeYBuWSoxwP030HOEg5PHLeVLkMdhRzJHp9IfLKMk0iP
    • smithi056.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCeRKgxFH16HtR2AxScPz2LHJqeKn2A+tgSkrozButwjGLM/CN0Zg7X/bPxyuRsxXUIKTIjfx0khb/5wqr+CzY0PC4i3KSMgaWp84rtwpwQHpPFvNrjR6nkV+DI9uU90hRF6z6GCP0ReHtAbBp5EmV/+Elg3BYgE1Gwo3vbOKMJOXnzjhQ17b1xZbmXlx0yswhQ9xhVLfBt4mYxXss8khUGRoI5uWvbSvkJ1aKgf+cNY4qpXF5GO5l7/scQfflZY/Jrcx2Hku1f/nIpGgxizUBxLbfazeqcosMbyA7/0KSF4hPS+E+SJEgaZy4Ld0rC+/CjIyhJnnUAvHw/uo+DqKAx
    • smithi150.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcVBukrI8w210Gt3735ouXSKKybXuBTDib0hJFWsgkvNnMIWeaxNRW5NR9JV1oORa/0hQulFl+/wawcSOpBiEqaR35qZoNl3Br0K1qs3BVl8NA6heo78Uqqh1YrAQhAAO2xLdxl2VAm7ycj+E9qoJ6vmGCvCinNeX02QkASN3VRvTXQpCfrvWyL4ycJ0huwwTSJXMyaXWhNwj7WMOIcGbZ/ZkDuknRhTWOloqnwq/lcdZosr+D5q0+cEBleBdqqdef8iYhdy4hby5fLzasOQFifJOQNAWdXa6WwG9tY3XESp7vaQ90xcOyoSAIHm7xohsmlfBoyZ/Ez4ej1wS4jB1V
    • smithi005.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDOsPKk9+F31PYDOJNMR5k4wb6qW1QrbW01MbHiIspqNyDNt06CSgRqiWot43ZueRJTNTm7qS2aNhaysP50PjI6VmPcQuPUhxoAu9DEw33stYhTxanOFLHGYruQBkkJ4dFQEu6J5NFvFLIHuJdZRawMwjaO1T46qv5qj0sKSVou3CBu/G5VQN4ga6ClAAry9ZPbFkElfyiU/0qFiL1BbvaH2Yw7mbiqdeJzPn9LdVb469GYptssDLvvtJnA1yqe8M7NTJYTjsCifA/tusHPIkGmVDXVqx6KAQ6aJOwxaDj7LSUlS8MvuEXfhs7TEbSteoUcDtRdvhpcbfq+Z68g6NEF
    • smithi101.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVuhMQHdA3vB1UzTJZhNPK0V0BL2Ml/mC4ISP4QVmhfI0KNrkH4nGCMuBu3YUbKRKZ8b1zrK2jy4QlOqH7S0jRHIVC13VpjTMnIolA+Xk4JobJ7RQyeCbcwGKrwcG0P5ydVmllte8xqPNf04ODMxkdQKL6/pv5qhADOW11RT1fmRjAQu+7C1WnB3Z9u7mDapJVtI1KrzcPsx6D05jp9QntCsuA10x8URc86AIcj5+aVTD2kQahopbU9/j6sYGRt9sZ3w7f9CaTp/i320w+qZO9scmrYliUhMDApNnxxX2Gz8GCostBnB3hwZRRvz2LZsPlzhtP/mZn63l8R7lJ7Iin
    • smithi199.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUncYEB/3FA5sS7d2M29NTWDJh3/J1rkXUWQJ7XyzDUOUNL+zRFDCwvdHVUoGXwLvX2EpvkD/fK3BWy7ntsCus3r3K4afn5fzSdpF2wHVTIA2rhAU7xJdvIDqmdzFAudLUON4R5Dpl2SxC1nWcvyM5Y/11Cm0ln7zmC6THAct8yMbQ1gn8In5XHbwwwqmQITV4GVNRFUVuNzisam395QhZWbum+QEM/i5eeqywHKE29kpsM7xzm+Hhx+E1DJlhmjzjEcbDueJuL9cj2D3FMH2AZDnuPlc5pfRrtBMk3kh0JM3VoZF14jqmuHmVSbjiZScsYdiQ+BrOxfe61YHVVYqx
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 99aaa89478069a7c9adc6484c88b2f905abff3a9
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_client_limits
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-04-19 16:02:55
  • started: 2018-04-20 23:06:59
  • updated: 2018-04-21 01:53:02
  • status_class: success
  • runtime: 2:46:03
  • wait_time: 2:30:01