Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/volume-client.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2018-06-11_03:25:02-kcephfs-master-testing-basic-smithi/2652467/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2018-06-11_03:25:02-kcephfs-master-testing-basic-smithi/2652467/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2018-06-11_03:25:02-kcephfs-master-testing-basic-smithi/2652467
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/volume-client.yaml whitelist_health.yaml}
  • duration: 0:22:45
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 2652467
  • kernel:
    • flavor: basic
    • sha1: d0e0731a2c1714e89f030a00b7a86ac020b37e76
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2018-06-11_03:25:02-kcephfs-master-testing-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • client.0:
          • debug ms: 1
          • debug client: 20
        • client.1:
          • debug ms: 1
          • debug client: 20
        • global:
          • ms type: simple
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
      • sha1: 9154a392b08f4eb0fd27de84fbb31c46f78b4659
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: 9154a392b08f4eb0fd27de84fbb31c46f78b4659
      • branch: master
    • install:
      • ceph:
        • sha1: 9154a392b08f4eb0fd27de84fbb31c46f78b4659
    • admin_socket:
      • branch: master
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: master
  • seed:
  • sha1: 9154a392b08f4eb0fd27de84fbb31c46f78b4659
  • subset:
  • suite:
  • suite_branch: master
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 9154a392b08f4eb0fd27de84fbb31c46f78b4659
  • targets:
    • smithi196.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYIbYsdDHeskAFMTzkGjoVKK/o3NA1Xpqvkk9JjH6GVxFJvqBKkGrTfj7HJLMwsSq0Z8+l0b624X87fD6r7rYBbjDMLTcUAar40yniChi40GoKtQQwXuVIIqswc320jC9dglvCdoOVjqHJN3vQk1lADZAT4dhtBulh1l7UY1avXzzQOJoOq76DKt0dY1Vp8CgrzCX2QwUwRVn0Mos9suc1Jc1VXVvd7oJaBYp1HDpmkAyTV81A3qWKqd3u4QEFx3Edp9Cy/Zj1D2+q8RzQzk5uC2rP30VOWEUtCIrnhdP7K9ZoP9DU2IPPkEfC5HxsLiBDb/EowkjQN0Ku9xNJVtEX
    • smithi033.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDyVE8z9MJSzDmDnNxheNlzKBKj1RvPV6l29YfVvb/GhfE715LmIYhRvnxXo6b22Xlp973subS1rTvEeSF0BhdTS4s7As72bQ5AGHCgVeR6WCUB7JhMYaeguQdq6TrB9oFSvVANzVKtBGM8OwLorFWlatkDzDLSeKDmliyriEzwJjVsh/jXEoTnqD4uj61kWkY3870GoMLdbp0bUaHD6x3Ms40Qr473jyKBV04vg7eqSwYwuGISh6QwWMJY5CC6packR1DZlZjiRVRl6ABeaQILNKRbvi8vKbQfSTa9x9kPQKG/CV6zmBg/LaxWfrvF3NgsNEqc02K4C3ruIkzv29yN
    • smithi081.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCoSmatUy9rQMobIntluCwTPRrIh7VOit/+uFaUTH+4rT2qGwjMdUu+fjA3QoKt5OISgnIXw9+IKvh0N8CCO7yMXozmbwIl+qetj+ddl8Uk6g20Il6sicUdDBSG6YXH+fbwB5xUoLAwAyhiRU4phSNu5A8kz8bdsyeQ5BC9dEmlAfjDzrNJIVkXLt6Wry/fL1VpZ8T1jTxFPEwOSBsmRdEWWyU9U15GYaZb5zWfuxFRRSs1AB7nvCKz4cU8YkcwHsdJGps0KgEeRQ6Uvc0PXXRK5X/2Hx6PlOjC746MtUV/soIUTMFMSt5ahhsIzGhZmXcUseBDnoW7q62XBoBt/lO5
    • smithi024.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA84SyxdUPgfiSxAMTcXqDpokYsYfM1XasmPC9eqLHpVd/Wcl7jrBj6EJWf1hMqxx44irrm66v1ke8UCX8CQHy2Z2lAAgtHgeftpM3XWl9dkC0Tby1yKZtz1PEJloiUFZlOPmCcWup7+h+PeyUpDp8dkHnDjIN5hm3c0OQWGCCsaYt6x2jqvxytuQUyzNzEoRg7BAWwaaOhkiu8ePF2Yros/UMhYyDT8yaqGULy1SUX3h/+WNwm3EMemAG2F6a0qM5y6lu1AGXnVVMwxNeGqGh/go158Wq+n9wbMFqFYCkXuUXw6wVhfOf8svzy3N7jsp+1deCTVf+1AhaQaHh370F
    • smithi027.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKsLm/Iht5CnLY+vgXZNxdwoWznJKNgmTJJ0x5sYnXTFcIAoagND+lCgKmWki96068z5fwUDVQbqlGjonts8gh3Bm9TZL2Mf3sAbE6hb54NsNVwyS2bHinGJYi4e2sNhtbVh6j0dAU8PcSSU9vStJykzGrSTCQesEUQQU3tW8ricOwH43C4oAY1EmnsjP7rWiYwgvbyZA0cYDg4gr8oHW8glkIYLP7yzgdChwatxrSWQUKCiJKcLzBQi4L81O5/6F2GRwUq9aN9gzZ0ROU4/xfE/qsgIQLLW5/5u+H3x7xm2NLLGY7wXhmnI0zNuPEr8MRCnCJ6BGVfl8n8J6r30lx
    • smithi063.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQClQDMKb2UrFfhFzZ8oEsUHIU/y2YUomKg83qtPfsSjkXBu5f82KsPSkjspswqW1BcLZcAFF6VmIJfplZfCcVCVZBdFk8mlwZdIZIf//ODJi2d5zggxjyyfZLqNLnahFU5BCFCa4rhWarbY9XMLpEh9o6Zgyp6cRAHIgft0ugjCI+TnyuB0qhbWUXVlr0Ug85CbW8qtPz0WA+02V9Kb27tdtroMXhgkPCokzMb/Q5uv6Sb17Gg2KPaClbnm643JzfjgeZW6rOLjs6D9tYVMfxCt9Yt8xG7eDNRdcuFej9HG6xRZySwIyih3VMsqRL+JpnCIexyYH+UBrySOAKWiV4/H
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: d0e0731a2c1714e89f030a00b7a86ac020b37e76
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_volume_client
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-06-11 03:29:22
  • started: 2018-06-12 00:12:47
  • updated: 2018-06-12 08:28:58
  • status_class: success
  • runtime: 8:16:11
  • wait_time: 7:53:26