Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mounts/kmounts.yaml objectstore-ec/bluestore-comp.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/config-commands.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2019-01-10_20:25:47-kcephfs-wip-yuri4-testing-2019-01-10-1638-luminous-testing-basic-smithi/3443543/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2019-01-10_20:25:47-kcephfs-wip-yuri4-testing-2019-01-10-1638-luminous-testing-basic-smithi/3443543/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2019-01-10_20:25:47-kcephfs-wip-yuri4-testing-2019-01-10-1638-luminous-testing-basic-smithi/3443543
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mounts/kmounts.yaml objectstore-ec/bluestore-comp.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/config-commands.yaml whitelist_health.yaml}
  • duration: 0:10:33
  • email: ceph-qa@lists.ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 3443543
  • kernel:
    • flavor: basic
    • sha1: 9e78dcdd94782ed234d6c3e148674b799a2c4a8b
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2019-01-10_20:25:47-kcephfs-wip-yuri4-testing-2019-01-10-1638-luminous-testing-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • workunit:
      • sha1: 22899e551b699eb624418e679c3da589b8a8fb5b
      • branch: wip-yuri4-testing-2019-01-10-1638-luminous
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • global:
          • lockdep: True
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 1
          • bluestore fsck on mount: True
          • debug osd: 25
          • bluestore compression mode: aggressive
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • mon osd nearfull ratio: 0.8
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • debug filestore: 20
          • debug rocksdb: 10
          • osd shutdown pgref assert: True
          • osd failsafe full ratio: 0.95
          • debug journal: 20
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
      • sha1: 22899e551b699eb624418e679c3da589b8a8fb5b
    • install:
      • ceph:
        • sha1: 22899e551b699eb624418e679c3da589b8a8fb5b
    • admin_socket:
      • branch: wip-yuri4-testing-2019-01-10-1638-luminous
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: wip-yuri4-testing-2019-01-10-1638-luminous
  • seed:
  • sha1: 22899e551b699eb624418e679c3da589b8a8fb5b
  • subset:
  • suite:
  • suite_branch: wip-yuri4-testing-2019-01-10-1638-luminous
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 22899e551b699eb624418e679c3da589b8a8fb5b
  • targets:
    • smithi050.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEcYPUwtZL++91hHmREitO2YK3daI4HsmWJx0JoOQrckK6EcxScDEF+AU+2KRi9YTz3LofyLY3pC6hzIau0lkH00BViU8MWdKcq7IQONd06yqH3JLHOLomycpg60lV4kFucsKf+SuldzfSUWKzZMPyfZ4Ai7N/VVd2coJbruhQ9qs5ioZafBmTDt+KwrkHS+TkLaLqR02sm1elh21RU/zyzt0EvCY2Ut97jzn0Qtl6tinnbYPLtEINB26NejPqMvc+ZgagZmbNLpogWDwWEZiJFDoK4pCvYHfzIszv50TyCQK/TPcTdfOLA1QB3VarwvIlfYyMpKaVvCREq+Tjj+kx
    • smithi184.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5mxlZmruCjhB4eB+RSb4kS2UuyIU59BHnNvVYskjCbXlT9Hcx7dJYCdqkvawcq68dWOLf6S0ZKsl3AW14ZNXrQJdYYf0GMKDp0MhKcDJdiqkdpSIu/2jyQyezWJPG442H/Zwz9ZllG2L4xnntedpBO9+2fuV1UkkK/LGNkTdyMJo119rhxQ9RF4AtmgU26oymU/PP4gjPHy+7S48jVVtxU+twCmodwMbIMaBrJdf/u2YoDNTqHQ5kDotiuBl60DOS21vr4irgonQkg81WTETdSX2xUePHDCgwaNPmg1+Lhsas3ahSYpOE2SuaZdxv0/T23DYHSVDw1yPCH3G/h0s9
    • smithi189.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDm0tsD+udaMRPE7QuGw15o8ucjc/mQ0fIScMu48t//U4iy2kftPCjv6HVwqaBB27wPm+nVyPPMXPiagrdyKeFP+96usW3cmNN3egTp0b8SM5yL7wcK7nYNqJjDvTXepol88h0wYjuYKXAkLnqLlITNDcAHP1e/xmOor8CCfOINxldE/4o9DoUAXrwWqtlSUQ21clhMnvN3gkLUIyfe0lLATUnxlyoMJe4SIEUuVsGdGbLAFIvGnFNF+ysoHX1dUJSS4Sa6JTaZUOz1+7U9AgvMx3jdxdg+7lhC8LKeESORyeb6hz9cCy7Uwb5lrFNNuXYPH8REEn6pup5fU5gnkeeV
    • smithi191.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGM7OHZw4BQCf0jXtcl4S3sKvI6GB8H4/CC6C65qC6VVTprIntYu/d4T6wZzNnOiQURUBYZREI+CZGpY9cxEUqQosMUKADt1XnnaQ80yO6HtEl0aeaEDMVnESHsIvQDXAvNH4pXo1Q4qSSbl9Wxfq5EpAP6HRYFmj4h66zQQte8za0MqCfjw11rOE+bF8vlVt+kJavWn82fcVz4K50gjFnvLxkauNr0Ki4kfzQ2mbDGoK6Y5Ouj1zaiJmIqJsG17OJiGsAnZRCZGY5K31BLBA0ExF8Vqug62ItBejlyUsDc3myBXTHDC5MoJAaGQosKUBDchN0Z+IXlLmP5p5ggRKh
    • smithi132.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGv1Z2lGb17J5vK5M6LFQbEEeIzdiV5yPVXVMIDdrJI0ZEL7ElATaaIMY7srK4SmzqsQEOmSPbPkBxbf7CXyJ8ug8k4upixYqSnTso/2lCW+WUzdUk4lg2D5tNURiFXGyKsUNFhJDqFwfzF/fpws0ON71RS44s6WqTx56H4S6m89qPTw3Y1KNb2o3m+GKe58p0nkzynU5rEXrWFlosa584OA2+efgVQF2ui+bxiqv3ucbc0Azgde7q2r4N4JtJxg5XX8khiK47oP96LUZh+6A1B0ey8bNjxNxv5Yac8qgZSy0Br9T5VcA6yoVsy5YLgndYtg6V4yZPwjUvSP4WSyAz
    • smithi084.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpgwEE5jX9z/+sGTH60MAM4XcZXKRnxHUryXHT7prBuKDuqIVf4zCvsqG1jz4zwv3GDzi7O+kF7Bm+2eKdZf7wg88VznyILMTWppvb21bfzrKoF8CwbTbYShILXur1/2oa3690GF6LMCONUuiOfmUs9palPTxR8mNxTk27aUd3rDH6eP//c8okTQ6yDzAQ/LL3JxBb31Hde570V5GXGN8jisHdk9h7C+pHdCVuqCyXuu/kwW7GTeVpcF/B7cv5YtKTRXqZRyfhUJZZW4Td3J1SPKDLZ0nPBFFMQK0V0mjbqo14UY1Qc9PudT4d+OLqPZQxZ3ByP6KpVgWaD+d++kNr
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 9e78dcdd94782ed234d6c3e148674b799a2c4a8b
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_config_commands
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2019-01-10 20:26:19
  • started: 2019-01-10 21:02:49
  • updated: 2019-01-10 22:46:50
  • status_class: success
  • runtime: 1:44:01
  • wait_time: 1:33:28