Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore-comp-ec-root.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/pool-perm.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2018-05-12_03:25:02-kcephfs-master-testing-basic-smithi/2525874/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2018-05-12_03:25:02-kcephfs-master-testing-basic-smithi/2525874/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2018-05-12_03:25:02-kcephfs-master-testing-basic-smithi/2525874
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/bluestore-comp-ec-root.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/pool-perm.yaml whitelist_health.yaml}
  • duration: 0:10:40
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 2525874
  • kernel:
    • flavor: basic
    • sha1: 568930335d9961789e96406657c0ec2608d383c7
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2018-05-12_03:25:02-kcephfs-master-testing-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • workunit:
      • sha1: b87103157e77d7ae3b3530b366a5d69455e3fe96
      • branch: master
    • ceph:
      • sha1: b87103157e77d7ae3b3530b366a5d69455e3fe96
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • conf:
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 1
          • bluestore fsck on mount: True
          • debug osd: 25
          • bluestore compression mode: aggressive
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • mon osd nearfull ratio: 0.8
          • bluestore block size: 96636764160
          • debug filestore: 20
          • debug rocksdb: 10
          • osd shutdown pgref assert: True
          • osd failsafe full ratio: 0.95
          • debug journal: 20
      • cephfs_ec_profile:
        • m=2
        • k=2
        • crush-failure-domain=osd
    • install:
      • ceph:
        • sha1: b87103157e77d7ae3b3530b366a5d69455e3fe96
    • admin_socket:
      • branch: master
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: master
  • seed:
  • sha1: b87103157e77d7ae3b3530b366a5d69455e3fe96
  • subset:
  • suite:
  • suite_branch: master
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: b87103157e77d7ae3b3530b366a5d69455e3fe96
  • targets:
    • smithi041.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCX/pnZPNap71gqbQxxIhgelMC58vZyZ+kYPuJyOQOtIoTijYGx/lax3pUor8k7ZA9G0qVUqEy66EN+vKyARHSv2smTNOCqMYXk47psQK1E2Cr5/qxVj8etasyUjnZ6WarNapDkwHfRyoLirphQr126+bHbE0jBeUhbjURJxr3ZoLT/od0Vp4SK+AYCdJHqpP5bHDL3qVreNsTylP/Bu9aOvRYZ3rKelZxHxsbFiSD0C55cuICn+Ri3FpIBGgzxg7521yyvKZZ3+UjM9yX/mBdGyjX17U8xz8JQFXiwaoWCdXmJc4ggbMvppOIOk9t96Gg8MDD9WXheo1rQZVNUyCDP
    • smithi135.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkvTm1SKkdDgw9yzOODj20ScNMhgsnR0eYe17m8XwyZkH+YGDPywIHB9ISJoHBHSVSzNnqKvS6M30mbzhnE1kmcVLq4CjeexpuKFWH93ff+7kLgua5HzT6rqcsIT/eS3LwSm5S0o9YEoK8fuBrlegYVF4rMkmEMPp7QSFwEe/xE//6EeVQRtstEOOzF+9Xq8LfRvfHVSQfBQ/ltm73vQoUCfpXkt583U/85p3BC2Hb2ZC9nEKYW/hFkokK0i5hgfPoZHZyqzbsMo35nJpmoSTaAuZsRzpzwKvDiAkyqq3TloZj/GbQuduW3mqxPgyftkKoVDRJhGjJzol+vSRjI6yR
    • smithi091.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtbGzmT47FjxZQ58AruJeXTe3PVTsVDD9Kv2S8NrEipTtwgYTAGItW/NRNipv1i+mN/9wbGTj5kboy/zPHcuR3S8e8uGWQV5k2ADltwsb/2AK/PeGmwC6CSypami7Rgtf8gee5+76x66SItMtDgRosxkxjLDoDe/yJxPdKoKmdLb/VCK0J5QKwAxEAXejTnFPyo1+p/v5Yss/v+tKjAhWXqyxegO/8+1LxHrIK5US2cYijEZL3sdr9YZASWinJLo24CLOk/s7RxuLSdHF4e6RIRuS7zeTsba20bsu0QOHCix7BP8IJzEAGbyrMUXVCugeROXbqUs8hmlx2jyxo0suJ
    • smithi081.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDAKaAnNpD+m2jM8lh5fBPG1mmeopw0wXlBGNAlzFDfjA5LrnOSwU/TTQP04iLbYPbi9s4rtRJ69oox5FTvSJdize7q1injaf8o6jh4DawDdZ6j4SGg+zUdr5jv3UQg8rTOvX/JrRYS2uc12ERyTscpZd+dLDkO3AuJ3iVFiJHwy7LB7Nzd++tsJW6kBFGgYRnpWSFGupYm5vVzIpfNMroz5pHKl5wW3XOTR+mi94BmWv3PrxyKwRExgDKV0WmwgMq4MsJ+lKGwBcBg3H2yrxkJhNyfK8BBmLbYl1jn99GIQhF62VGPh7RP956UJJvHl2KJL+11Za89ho1q6azJfK5
    • smithi068.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSTLFoL81O3U17wwgbJ4Vsv0UaK7NjqmaDMAzR0fa7JtIA9JPz8svV6Msh1eM/Kkz9HfK0JzESCFeMvB7FqUQHHM+V6Zye8B5t1OvbjfX5fDab9iKQ8KCt/TBu1Ojsd/+2tKbWVUcwFLqdkDp078vwInoKqXB082R02saFKaxqelb8+jFpc0xLdSdai1N6rTeGwEXvZ5lNTXFjSXP02RU1yRl/c9sCwpp5MO/tbPe87LywydWnI8B0aUxkPRMEsputhDbl3u50h9bwaYwWtIYusKy3HTQM0mvqNj1fA+GGNZslVBX1p77gAVl43OI4hbgi+rJft1gjtKRsurer7PBr
    • smithi173.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMLjBqJwMGB+hU8VadjAXgpyLSajPq8+DuqKNzH5ngs2OJx678NFNOig/E99bJU3OINIFpbfX6b8ctGutIpYGz/R8vLOU6pQkuMqRhfZNXpczaz27e414a5AXKeEQDCfcKv/WG2263kYBLsMxv6ufrL5sPjghPOVgK8EjlTVEvv3re+/cy2AG9w+9eZmafyGO5NYLLYZyU8NMiA2QL04D7CoPbX4qdYuLf7h1tyrLoUD2gDQx1qkN+pG8NSoyeAvXZXuSnbESZLpTxnY8jUXvtxMlkLyNiv5t0+IIgctYOTwBPwsR9c1SCjVFnlF0JdDPpzvlSKfp6uMAEiuvuDt93
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 568930335d9961789e96406657c0ec2608d383c7
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_pool_perm
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-05-12 03:27:56
  • started: 2018-05-12 20:13:03
  • updated: 2018-05-12 22:13:05
  • status_class: success
  • runtime: 2:00:02
  • wait_time: 1:49:22