Description: kcephfs/recovery/{begin clusters/1-mds-4-client conf/{client mds mon osd} kclient/{mount overrides/{distro/rhel/{k-distro rhel_latest} ms-die-on-skipped}} objectstore-ec/bluestore-ec-root overrides/{frag_enable log-config osd-asserts whitelist_health whitelist_wrongly_marked_down} tasks/failover}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2020-09-23_21:37:07-kcephfs-wip-yuri6-testing-2020-09-23-1226-nautilus-distro-basic-smithi/5463752/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2020-09-23_21:37:07-kcephfs-wip-yuri6-testing-2020-09-23-1226-nautilus-distro-basic-smithi/5463752/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2020-09-23_21:37:07-kcephfs-wip-yuri6-testing-2020-09-23-1226-nautilus-distro-basic-smithi/5463752
  • description: kcephfs/recovery/{begin clusters/1-mds-4-client conf/{client mds mon osd} kclient/{mount overrides/{distro/rhel/{k-distro rhel_latest} ms-die-on-skipped}} objectstore-ec/bluestore-ec-root overrides/{frag_enable log-config osd-asserts whitelist_health whitelist_wrongly_marked_down} tasks/failover}
  • duration: 0:27:56
  • email: ceph-qa@ceph.io
  • failure_reason:
  • flavor: basic
  • job_id: 5463752
  • kernel:
    • client:
      • sha1: distro
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2020-09-23_21:37:07-kcephfs-wip-yuri6-testing-2020-09-23-1226-nautilus-distro-basic-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 7.8
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • workunit:
      • sha1: 398802626dd2de19ead1fdc45920f8b13b773070
      • branch: wip-yuri6-testing-2020-09-23-1226-nautilus
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(TOO_FEW_PGS\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • not responding, replacing
        • \(MDS_INSUFFICIENT_STANDBY\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • cephfs_ec_profile:
        • m=2
        • k=2
        • crush-failure-domain=osd
      • sha1: 398802626dd2de19ead1fdc45920f8b13b773070
      • conf:
        • global:
          • ms die on skipped message: False
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
          • mon op complaint time: 120
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 20
          • debug journal: 20
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
    • install:
      • ceph:
        • sha1: 398802626dd2de19ead1fdc45920f8b13b773070
    • admin_socket:
      • branch: wip-yuri6-testing-2020-09-23-1226-nautilus
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.b', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.c', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: wip-yuri6-testing-2020-09-23-1226-nautilus
  • seed:
  • sha1: 398802626dd2de19ead1fdc45920f8b13b773070
  • subset:
  • suite:
  • suite_branch: wip-yuri6-testing-2020-09-23-1226-nautilus
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 398802626dd2de19ead1fdc45920f8b13b773070
  • targets:
    • smithi185.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXEwnW2WDcaxICC2cSx1Q84vjBi+NInBkpaGkxh8ubqCfNe5NZnkwTB6luk0UWyR9Kw339B1DKWU3XwyX2IIqb47Ld0La04aWyxO6A+/Q4SAcO8Olw9lYeEHi2VfoY1LZvnbaac+DLUMexSx+uHmAKIkjjrn72P3VBDZT3GOH9f0qA98nIk4ipI0iMpytjY3ommS6bVtUOAhBry1SD87SMFg1Qc6mUfNebGB19WyJgQQA7OguWMotthqn68AfhhoxXG9muSbxpVG+5UwFywdCwazo2d+XZsWq1MHpUeX72vi99mplRjQVvJw5WzNYMeVjZdX1Q4RWAjssFZeN2Ct5f
    • smithi018.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLvoNSqwN93B9R0nfnd4pgkxlfMQC7CPoKP+TSlQSQ0O9mx37sonWD9CeeRrsRpbrHZ+g6ZIeVCmIhUv4k1OYD0486Yz+NM2aEmGfBLS8nBN1Jq1rjcWDJnEV5WjPxPhT2QpPu5U+oa1YEAeoOF23PxL2lk2KD5I1BenbsxSQw+CNkec3mOxyPrWVDbcaj9yH4AdCu44/RgTwkoZMBtx2gE1jyDVG52qoXHxQ+TtVB9ObGz1fGuXqjd65KSxt/iWn4lqVbbZNbYRFtmbTe/5qnwhlFnLbphlHui31C/h84gD7LIzH6YCn32oEQIChvyUcMe85glQo/Zf+6/6HTIH5b
    • smithi071.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI7YIGOXvo2mxF7Zmzg3O4xom+V+8GqfgTGVWbTBpDOWcJsScZjUhe+r7dTXbN0eHKTtul34BylAh0KRYPalbY939JI53XvHYPESbYtQ0r0J0fzwZ0qD9QLgXhrGpfS/yaSRPg/aooJl8nHFDHJkZxQ4GRUtA1OJrgaG8HSBfcBvkxsWBoqQlnQsd3o9A3nvBsBM8uIJ1Lozsa2PA/2XvX23j8r9mBlRfxxuhpq+qEZd4SfsK18PFTu0OBXC4TI4n7UPzjm5RxEYddkjqjSbPxMo+Yc+tsXnHV0CWylHmfkdQsZO+Ndpe8wFLaRjyIDNYXOUECmjHOURjP0RQ9VMYV
    • smithi067.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9ng8i6uz9CW2zEkwR2QpLs4EKfu34nxqy2Gi0uXRAQJnDhQ83Z3u2MHCnvgqb9bnAOY1Bhir9KNqmS+mtzSq1XNqI21p4GEnxpUsV99LCiJ+I9okxw4T53jqKDxiWbbbriuUQzpE+55gZJbPbPxb1iw0rWB1WK7P4qQfeKURDY4zjDOYuYCCU5oSuGgGbAppByf+rqZPcb/Vxno/z8uhlmQrHARaDZkwA2Bs/42PYi/H2BdnZ+Q897j0gCSnW56ccS/bwlu3ebkV7CrT9saRBsQ07T171L9eNjsamFLWm1LAdzWfi9LfwXI2iw8+2V7jtAZBeoUlpcRJ2gdq0XL29
    • smithi053.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpBlHRfoTrBtelkD4tRLsctt5NN1MtoF+AeSAIr2LuhXSxf6ZDevFYhjmzqz+kD0s8Tsvd37sd+ZEJBqdfENy7atvX+IkQtTz06Iq3eKNUch+ufS6vvfs3GErqS6S7Tk4EjymyXhgUzVUQiDTdQOC6ZZHfqah8NTGXJTFY7YMnJc13Esga2nimiduXdLpSIVNyAfXRcR3W8RH6BR8LFxXNoIwjh1MK7lJtM/ne4ZntRuweEdZpTssDD9dX5+RnYumfnlvQoZvJ6fWqtcOvIq+76Ss2OuY2hoklcqJzHt2XRgWWHFHrVrWKhpYr+YcP4b9ZnbUFcvt3Airvvhl4wINd
    • smithi016.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBdVHvYfh+ygjO7wya2EB8Xh/T8QSmXP7/LQP4RdTHvCjKB5lmZ+saLAqfBVI7zDekNv4RSeTsuV5UZn6A4Q/Af4DI7S7ceGz1C00iB7Zf8ZoxhtxBbgc1Wt1PkzA1xgPTbnFPBAw7HqMnkxiPNdEOb0VJYxbdoIyyNDEJd1BNyXYlMTvDerL9o8xgMoBHDWKflJvs4YbN0UdX+AsVKgqxvQZkQ+0D3dzse/P2c1o4UX4GAFana9g2FJxf4U8aiL1Z57t8wN3q3LJZoariE4wbT9zcNDkaa1eL9Wna3jX+Zb7ti09FZDPaBqSsw4FP1jM8eZc39eAZXt8bdXmsQj+t
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • sha1: distro
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
        • rpm:
          • python3-cephfs
      • sha1: 398802626dd2de19ead1fdc45920f8b13b773070
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_failover
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2020-09-23 21:37:57
  • started: 2020-09-24 00:46:18
  • updated: 2020-09-24 02:04:19
  • status_class: success
  • runtime: 1:18:01
  • wait_time: 0:50:05