Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/data-scan.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2018-04-17_21:18:08-kcephfs-luminous-testing-basic-smithi/2409706/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2018-04-17_21:18:08-kcephfs-luminous-testing-basic-smithi/2409706/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2018-04-17_21:18:08-kcephfs-luminous-testing-basic-smithi/2409706
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml debug/mds_client.yaml dirfrag/frag_enable.yaml mounts/kmounts.yaml objectstore-ec/filestore-xfs.yaml overrides/{debug.yaml frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/data-scan.yaml whitelist_health.yaml}
  • duration: 0:21:40
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 2409706
  • kernel:
    • flavor: basic
    • sha1: 99aaa89478069a7c9adc6484c88b2f905abff3a9
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2018-04-17_21:18:08-kcephfs-luminous-testing-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
        • \(SLOW_OPS\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • bad backtrace
        • object missing on disk
        • error reading table object
        • error reading sessionmap
        • unmatched fragstat
        • was unreadable, recreating it now
        • Scrub error on inode
        • Metadata damage detected
        • inconsistent rstat on inode
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
      • fs: xfs
      • conf:
        • client.1:
          • debug ms: 1
          • debug client: 20
        • client.0:
          • debug ms: 1
          • debug client: 20
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
          • osd shutdown pgref assert: True
      • sha1: 64ffa817000d59d91379f7335439845930f58530
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: 64ffa817000d59d91379f7335439845930f58530
      • branch: luminous
    • install:
      • ceph:
        • sha1: 64ffa817000d59d91379f7335439845930f58530
    • admin_socket:
      • branch: luminous
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: luminous
  • seed:
  • sha1: 64ffa817000d59d91379f7335439845930f58530
  • subset:
  • suite:
  • suite_branch: luminous
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 64ffa817000d59d91379f7335439845930f58530
  • targets:
    • smithi131.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfCvewseqTOQQ/mY+xvFWQHeSlHVJHAKyI+HvfnjKxKWHYX6oFJVS5tyhcZ3BlD4bjPpFZtGPtxskjdNTvJClfBSlnsWzFJZbvxXlreFoe/0ApV+aaL0oxQcXVPZol6JsNcY8aIXOhBNiBo+ubM12nVoAldoR4C+sxduk+KO/AXlzWhATwoVmcsLGjLaJoJN8zX+fvmHxYCUczEeGL8C8IrEZHp4WMXxmJHo/hZkhd362kGDMkICD6Bsr8YaeuarrZf7wPPag0gF7dQ0rO0MgS22amQEzH4YThgb634KwRMML/triNd4GmYpH6llit/NONCO9/dIabtpjSVYLb0sol
    • smithi156.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDaN/rLo1+Uf5NTmtChBvuBQi1UCVcyqlq1qEevrGiCao97cL8A9NI4MZwbAudU4yE33UjFQg5VsxaG39j/lqSGg+mJQ1vDlVd5oqsK77itGOutgA4WE2J9IsYPx20aaLSLW2DVDnw+mA8/IYEhj3/HIcBspBHqmhdsndSbWi+x1qoWqTKzfJo4fBfOzrBLSBFBRwioADK6K2uXChW6do3rtsFiiJ1o01aNIsGKErRmElT6eAYWRc6vmyAWItljlEuelHlnbPtdDrVw0sphw394iGTOOK9kmFT3YXOqsf7x+UZBOSRTnhVyMPQbhTCRa1dCrcJWcPY65DXbJSB1Sf/j
    • smithi160.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDOgAMId5wqWUQeQAXvgqxCuzA3rraD0GjLlA0Pjen2ch/gYJkNXHCaS8Ac7Z0kWNNmH87IrWKIRqvVvaqioPb0qG8oYg0omNqAWlPCu1CQtjZraeoiV+t6nZ+dG3Z5Qob2reEneeLTonfthcZkr4dCu4fov336IrLwoVyJX68GLsx+uRVWb+7Kmw4VI332q218XYfLC4vhKcs0pDjILEy9IlZpLJkJVoJUR12MNXwpOKfmscDbmK6OK5fPD9Rpfxvq67EuO0G3j3zQC6byv/Tg6Qt/DeGwcLcVv5T2xsWK4ex0Rf+Z34Vq9T2cfaQbddeIuxkFo83SAcVujiahrewt
    • smithi098.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6PmqDN3TXu7tmJ+akXsf8fP62LHURJRLu04OvTUg1OwnP/9dsLCt61n78BPn/zEZFH/pAYZVV6i9GHSCzHAaCnJGecWaj1jVgNBnNNB1mXJXcXptFeImqKEn7GqjFO9rWwyx0H4hYQmT7LlD35ZUiI5gGVSts/umjx4EK3Cyc73PK/Ihvk2b7RpU4WmPjDdqsaUvBiWdc2LE/4kR0mg/rSsr1fd9rH4M3f2okSmDye0jB9rCX6FRLgZ+LobQFcyHyLS8ZFcSqONC1xVq9IiyS8Z+lEhc7lL00BllopuW75I09a3+AlrFM54qinjR8wUg2tOiavmfzmehlUKUyfzkL
    • smithi199.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD268lvBUdtgXZzNJ73bjkQaTLS+tQMZBYukOw0dwwUDG9QEx9O+WRZSJDaac/gBlbH7muKef2S3d0Wh4eCliH8YCrHiEWhAuf3fQDplPuBVYpxTISqf+BYU7Lb9EllD9hJd8xBQRrtNr0mQAB5WrSMHbWBZZ5y4FzzXp44nZj5tnrulbmCo4NT/ianAmFBJPSqv4pMT3RfFOlVYRBC+VmGW/lGGViwcV7+UEhbbQi7VATurbyrEuQ2hvZ7Y0nFNp49ssVArcklTSUL5ZVvaAHcrT1z3iC37DdWqeOgwvAIDtQdXM7Nl2MoRRcV6Rd/52DCsel5wB/CBKCoa8g0HB8V
    • smithi070.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6oXQ20vOOq+LBRZjQN5Dfc2/BdcLDxj0jkAQQZgbSLggx+PLVFqRo0ebU/ika8WWq1EuaJc0Cc+VWizP5CuNLPOGoXfncof/0k6lzhAHwqhX2hHolNavTePeGuJ1RkGf27/mhtDOAtf/nTosozS2efw7e77h8cWhaMeoDJ4mi3+sroxh2ReCBdsBAAPl3ld923mCg3y12YCnp1FM05IZYdBTyq4404XqF2/MrlefishKo96vGU1v8qH3tbqnGLvTI7luJdLR7/wLIpmYOsi467rh2R4sWsb8hwaxUl735TtmAu6wDWr0Jg05cUc/lI9Z4UCMR3wm8KLh36inBzX+L
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 99aaa89478069a7c9adc6484c88b2f905abff3a9
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_data_scan
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-04-17 21:20:32
  • started: 2018-04-18 06:18:45
  • updated: 2018-04-18 08:40:48
  • status_class: success
  • runtime: 2:22:03
  • wait_time: 2:00:23