Description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} kclient/{mount.yaml overrides/{distro/rhel/{k-distro.yaml rhel_7.6.yaml} ms-die-on-skipped.yaml}} objectstore-ec/bluestore-ec-root.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/auto-repair.yaml}

Log: http://qa-proxy.ceph.com/teuthology/pdonnell-2019-03-20_02:50:47-kcephfs-wip-pdonnell-testing-20190319.225631-distro-basic-smithi/3749638/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/pdonnell-2019-03-20_02:50:47-kcephfs-wip-pdonnell-testing-20190319.225631-distro-basic-smithi/3749638/teuthology.log
  • archive_path: /home/teuthworker/archive/pdonnell-2019-03-20_02:50:47-kcephfs-wip-pdonnell-testing-20190319.225631-distro-basic-smithi/3749638
  • description: kcephfs/recovery/{begin.yaml clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} kclient/{mount.yaml overrides/{distro/rhel/{k-distro.yaml rhel_7.6.yaml} ms-die-on-skipped.yaml}} objectstore-ec/bluestore-ec-root.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/auto-repair.yaml}
  • duration: 0:47:50
  • email: pdonnell@redhat.com
  • failure_reason:
  • flavor: basic
  • job_id: 3749638
  • kernel:
    • client:
      • sha1: distro
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: pdonnell-2019-03-20_02:50:47-kcephfs-wip-pdonnell-testing-20190319.225631-distro-basic-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 7.6
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • workunit:
      • sha1: 311db58b62aed880c8db8db670637b4d1c68a653
      • branch: wip-pdonnell-testing-20190319.225631
    • ceph:
      • sha1: 311db58b62aed880c8db8db670637b4d1c68a653
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • force file system read-only
        • bad backtrace
        • MDS in read-only mode
        • \(MDS_READ_ONLY\)
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • global:
          • ms die on skipped message: False
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • mon osd nearfull ratio: 0.8
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • debug filestore: 20
          • debug rocksdb: 10
          • osd shutdown pgref assert: True
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
      • cephfs_ec_profile:
        • m=2
        • k=2
        • crush-failure-domain=osd
    • install:
      • ceph:
        • sha1: 311db58b62aed880c8db8db670637b4d1c68a653
    • admin_socket:
      • branch: wip-pdonnell-testing-20190319.225631
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_pdonnell@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.b', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.c', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: wip-pdonnell-testing-20190319.225631
  • seed:
  • sha1: 311db58b62aed880c8db8db670637b4d1c68a653
  • subset:
  • suite:
  • suite_branch: wip-pdonnell-testing-20190319.225631
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 311db58b62aed880c8db8db670637b4d1c68a653
  • targets:
    • smithi122.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEkzHEVoCVpcyoO8zCSxT06Zn90gneIrQiXreyrgEqlt7vsXID6CRTgseKR3RHNehtt1NSe++YbGg5GB/x+75vXgUwYkANwZvey5tvcoMS1Y292k0usgpajtyIX65U9a3KHnXJsn66tl5M8Pk8/vqLjBcwT3tyvhMNjAQz0jUVTTOwncfnMtcxZNxxDwzsLDpjla9M8S61sIr7YPk7qT6N9jv+/r/N2nWkGO8JN6ZJWQxs7mnNB8NZdgiwPnHBiVud4f4hLYvRbjNWoZVQmbv0HTqY+TCfw9Qp4enXlPgF2Ax5b6DTTW97BQUgBdD1pMUx0NVLLzgFA6A6JhFyDG63
    • smithi091.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCutfaRUgjeqnfLLO3PiOOK1SmUpmaAOplluVzxXCe1zGKrqtYvFetNKB3TO9dQ7g8Zd8ACFabIMoUcdgtX78xkfk62q4gS0TQYz4eGi3r6EZ/Mu/evffy30lJQW3OXeBAr7bsstUhQLFkzVWxezDLXow0t1VMNSkawawJZOIKjM4zgM3MmzLjJFS5n6Iv1eepclLysEEzhSdG09Y8TdEqWhezWQw2bUGPwIqz58BO2dCzjKnNk0w/QAXvAzOItC4Nluaeq1Qa7VRY+dR8M1x3UFR4XdAgf7i04WHb/f5O4m98xVQR/80n+TxUDOwerSG8C1F6AfrzX6EIZfBWFdhMp
    • smithi151.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDjh6327COQocCDvMbULwZXfbM4lpO5Bxz0i2Oqi63LJlo8CkhiNmjuIGbWwC49QfvUdwdEqfTkP+q2goXJ2Cp2wzGBC/mVWl5N0ngoSRXoGfFQsxyzUTU9rQuIMtjvIkKQMn35Ezf+v33kyKS4v8qlyDtn3AAo+q+ZlzJQVdyuJcjLFYbD/W2+ZUOnDaSMrXjDIZ5L3Y83JVLM1/o1pplHJe7+viOlf0fFQzafWOxCGY7YgIfah3mj+pjmophFdEgePeaNY9NVGoXkd7eTnTXwLT69pNvTPLyVHOT77cXXtze/3J8i90Rbf25Fd27x/OGb3ijyAfmBomo3ZgBEw/sl
    • smithi115.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs6XOJm/fkQoySAhS6mWcx+eH4FDlhMDRna3Zjv0YGa9KlQwuTdkvkO2doBGsgmtYbk9nQlGAjAbh+H8xkJPjgMgyOivNY+qFscIxq3Bdpci3o0JuctQtLYa90YiJvhK0kJaC9t/UE/YnoXLRUc+TyJWcIvTiNWwcG2dtgRUzdTioaKSvAegMJD0ltMAhK6EinfnL+7cQX40+1c1Dfzu/Zez2m7YORj/x3c8RqswVXpXzfV+V7f2BGGRbIw8brzTbiYJ/pjxgZwajVutDAYIQHugeVHsINLgYsaUIM5AzpKg18SBj1kCwGBaQc+EOkWEti048jPzIw3zULxa4IHLuv
    • smithi138.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1w1E1hY7b5e1DSOFqNPnqEoNVctn10TZxw6ITzueESfi+VIhVUJZ8Ktb2A2/OHQWUy/u2g0fPIdanqQ8HuW9prOz4WUB8EQu1Si54o9Ltkc0pZK2AyAtmUkqBH+XvOaPYgtxobhIkwUDDFqG6vYr/W+D21zBpweWmozl4BpRbrcBdFarS2285lTS2K9k4OOjgMU6fWmujRfffXx9XUgYBn/M0Fv11KxsfP6FwX61h9NOqtaftBISLV6UAowYON96uSE28Gev5PdKxKTVerZix5WpqKeh8weqFT1GX8VapuaaMrnic0ZaLIkzx2UtNxrQJJdPXqxfV+qXjh2mQwarD
    • smithi205.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMyGxNAZCQYfQk/fWpxizB4GyTnlPAwa12t0yyBcTqemSPvaqutcz6UaDgQaGBFffooa1XjXvht3Y+bMqJY/LPelZkrIJvvFsV7lEf4+xM89i4PXN/8BIi2/mmAr9uC9t1TdfqwG3/OmkNFXYJSqMRx1W2Db3CfAhcWp1hwVbRQcSlDGtZiLQHFNP/MwDeFRbObpWhnlVyu+hbdGwWnuOOOp2DElDuSovb/sjFlEXjoTAMHQvWMDo+hUn0obKXuDfGP75H0uGsHd3DRHZQ2yYE+eUhSU83YJcrFOAVy+3Yd/47FthaToAg7+IEbyjNZojoGNS2Me+tI/ZtI6sKg18N
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • sha1: distro
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
        • rpm:
          • python3-cephfs
      • sha1: 311db58b62aed880c8db8db670637b4d1c68a653
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_auto_repair
  • teuthology_branch: master
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2019-03-20 02:52:50
  • started: 2019-03-20 12:32:22
  • updated: 2019-03-20 13:32:22
  • status_class: success
  • runtime: 1:00:00
  • wait_time: 0:12:10