Description: fs/multiclient/{begin clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{frag_enable whitelist_health whitelist_wrongly_marked_down} tasks/mdtest}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2021-05-03_04:17:03-fs-pacific-distro-basic-smithi/6091652/teuthology.log

  • kernel:
    • sha1: distro
    • kdb: True
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
          • network-manager
          • iproute2
          • util-linux
          • dump
          • indent
          • libaio-dev
          • libtool-bin
          • uuid-dev
          • xfslibs-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
          • NetworkManager
          • iproute
          • util-linux
          • libacl-devel
          • libaio-devel
          • libattr-devel
          • libtool
          • libuuid-devel
          • xfsdump
          • xfsprogs
          • xfsprogs-devel
          • libaio-devel
          • libtool
          • libuuid-devel
          • xfsprogs-devel
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
          • cephfs-top
          • cephfs-mirror
        • rpm:
          • python3-cephfs
          • cephfs-top
          • cephfs-mirror
      • sha1: 717ce59b76c659aaef8c5aec1355c0ac5cef7234
    • ceph:
    • ceph-fuse:
    • pexec:
      • ssh_keys:
      • mpi:
        • exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R
      • pexec:
      • verbose: True
      • pid:
      • duration: 0:12:12
      • owner: scheduled_teuthology@teuthology
      • flavor: basic
      • status_class: success
      • targets:
        • smithi002.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8tCULr19sNrdPzke8yOQYMcqw7wx/L/YdNlZIEP5xBM2+2kp5hQW1e/YVybphueBt3lKqSMEyIXRs6sIZKYKxTX6oNKYf6aBmX5kHlz59Xwkr9vEVuOT3HZrJVgFXkDKFw6sLgWtBXlMQ+6oNolfTqrIGHn8gXWboh09+ZDodHwTKWUrnN0Y2Nj/OAyEH1NwomaV+wzKifeLsHqXmVpG9azzbPNX3JDVoGmOBFblMyZ4zPiwgkLm2Urxb+ckcnyMkLEzc3ULMk2+DhpbKte17y7gf20qDh7XgYt4GuBQLjh/QIcFgqWoxRlUue8K5laxEqL9HJYdbwf/Z51P6bPUtQYgInQCWU3lg/8HERiJluMCWZUXa0M55gMJSB3VMu+/DfU5bnTRqeMxrlLnrRez6G2ZoGAYTQAo79OflladOmNCSB9F4PiOeV7g5LLgM2HipF/6uPcuuBAy9W3VubapDKu5DGv/vF4pqgdw0t3UBFpWr1zvMLNwwbqcG0JwI/6E=
        • smithi041.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDC8/Bxbt1EVokKKIhUh2oYVTKrMEHCn1DauTUaleyX23P32zUL8rTMLZ89ycNfFbO96bBsLdmmhdHZkbwKXFOys9G+UdMAtyMn8jTgo5MrTcJ2rmk7sAddZ4/gf5YM42eSYPhQujot+5bhUOqtX2I5VQ2ppentnXuUwMjXl2nl2egudc63H+6f31ETJHQBs7/je0+eI06AsO+kv2XxvO2SKqY+X/4IQRvLAzOSbyVjz7PCCtuIJ68ll7+cSp6vDr+2eMRZa4uXxURUZhhYy4zSpGQxBaVKAY5idJmJiz3wfzuG9Ea6nP6GFJqaTDdLBuiG46HFusnojQrqUoXGzryqUoKjIks2pNC/Zr/9lHTo0GQJM1ITUwTFQkmzGb0rZvogX+C6zVpXq/sfPY8juAzrDkUulmq3R/S59lvRH7PnvtHduISumSu84WF8wIc8ytjJeJU++hs/ZQYwB+WTEGP+PbvOSXR4cWpHGnqFuK0g6cNsBSPOoS6AqEFoHkTendU=
        • smithi167.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9wo6vF9/1s7SWIBNQ4G/P7Ean+Id/LAcx3OKMnRbhJa1nXuGuoq7L9gqVOgSl3NYb5qwvGbW8nJjwIj5CWWyRNKG+kum5nRvKK7CLWimAN3KumPLYBIbZ59gRdBP8jIvYfPjIqOjWay8dmYwPNT8548YLeX8uiaH5k5UIZU9Fy6EHZD8RpJvh33JjMpjWx9IAxFa5XHc2dbQ0VExTPPtiu3FkOjVQTaiDfWBnGAQSWW6eIxLHojXeKSoyd9KnIgOffM1DZRVSzFxWaN4Ea/0Wl7qLhwpuMORrlj4x1NRURobA+iAAFefY0Pi8h2Tu7qC8L9vsDBCkTLIgGXPNpMujyW2/faN74EN9LAHadTgLdaJaUHz8LQ5C+HICbCWH/NZ8BuKANrUZgot7E68AKmr9gC1HlQYSoF0Q9ioaV8ZlmEG69FY5CJAoqhD4MsXm9VTlr+R6iKTtjt5GWDEfP3Nm6kXrcBHRdKfQYqpQg/R2hTdutle4WKSMpThCmJHF6Zc=
        • smithi172.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDsQzDt7DC/C36jvHA2bwzUaYra3NTno67iAbB52WPmT7FXhn8jwq9VG27/gZqMV14rj4KhOkgYATJl0bNMPlrXlLo7ppL/IHMvIN8llbY3EXwV9A4hbLvKz8N69s8VaemPn0CiXBORpCG5JGmwb5HG9mqve6PpnZl7Lw0OrkAy9JL1QmyVNG4whhVy1YvSW7hHbSsW3LP9ZZTxGjxOUkc7SBhFraT+3u1Vdwa3zawsUW7xPetxE8HjhMq3ZZzqMySWrWxUTHefZA07eivThbsunttLe2VIZ7J4HPywNK1LxxxsZyLpGAzfgAKzfvZlRiXUVC1XtWG+0G2oQodfrbeM8MxRoNH+ujkz1lHbadwxOgqVm50wIeOXHJ/XADbVpcQ44GujBdW2biHR/YDoMphBhyWC+wgF2Sc4l1fY+bb7GwBDF7btC8gJ/xeloKYsINgq1REw22WaF54OulMFlFRE/cTA0kmZcUBlVVIih65Bk8c0ZS47kgTG7qGnCPAM+20=
      • job_id: 6091652
      • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2021-05-03_04:17:03-fs-pacific-distro-basic-smithi/6091652/teuthology.log
      • suite_branch: pacific
      • wait_time: 0:15:03
      • os_version: 20.04
      • branch: pacific
      • pcp_grafana_url:
      • email: ceph-qa@ceph.io
      • archive_path: /home/teuthworker/archive/teuthology-2021-05-03_04:17:03-fs-pacific-distro-basic-smithi/6091652
      • updated: 2021-05-05 13:18:44
      • description: fs/multiclient/{begin clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{frag_enable whitelist_health whitelist_wrongly_marked_down} tasks/mdtest}
      • started: 2021-05-05 12:51:29
      • last_in_suite: False
      • machine_type: smithi
      • sentry_event:
      • posted: 2021-05-03 04:20:43
      • teuthology_branch: master
      • sha1: 717ce59b76c659aaef8c5aec1355c0ac5cef7234
      • name: teuthology-2021-05-03_04:17:03-fs-pacific-distro-basic-smithi
      • roles:
        • [u'mon.a', u'mgr.y', u'mds.a', u'mds.c', u'osd.0', u'osd.1', u'osd.2', u'osd.3']
        • [u'mon.b', u'mon.c', u'mgr.x', u'mds.b', u'osd.4', u'osd.5', u'osd.6', u'osd.7']
        • [u'client.0']
        • [u'client.1']
      • overrides:
        • ceph-deploy:
          • fs: xfs
          • conf:
            • client:
              • log file: /var/log/ceph/ceph-$name.$pid.log
            • mon:
              • osd default pool size: 2
            • osd:
              • mon osd full ratio: 0.9
              • mon osd backfillfull_ratio: 0.85
              • bluestore fsck on mount: True
              • mon osd nearfull ratio: 0.8
              • debug bluestore: 20
              • debug bluefs: 20
              • osd objectstore: bluestore
              • bluestore block size: 96636764160
              • debug rocksdb: 10
              • osd failsafe full ratio: 0.95
          • bluestore: True
        • workunit:
          • sha1: 717ce59b76c659aaef8c5aec1355c0ac5cef7234
          • branch: pacific
        • ceph:
          • log-whitelist:
            • \(MDS_ALL_DOWN\)
            • \(MDS_UP_LESS_THAN_MAX\)
          • sha1: 717ce59b76c659aaef8c5aec1355c0ac5cef7234
          • fs: xfs
          • conf:
            • mds:
              • mds bal split bits: 3
              • mds bal split size: 100
              • mds bal fragment size max: 10000
              • debug mds: 20
              • mds bal merge size: 5
              • debug ms: 1
              • mds bal frag: True
              • mds verify scatter: True
              • rados osd op timeout: 15m
              • osd op complaint time: 180
              • mds op complaint time: 180
              • rados mon op timeout: 15m
              • mds debug scatterstat: True
              • mds debug frag: True
            • client:
              • rados osd op timeout: 15m
              • debug ms: 1
              • rados mon op timeout: 15m
              • debug client: 20
              • client mount timeout: 600
            • osd:
              • mon osd full ratio: 0.9
              • debug ms: 1
              • bluestore fsck on mount: True
              • debug osd: 20
              • debug bluestore: 20
              • debug bluefs: 20
              • osd objectstore: bluestore
              • mon osd backfillfull_ratio: 0.85
              • osd op complaint time: 180
              • bluestore block size: 96636764160
              • debug rocksdb: 10
              • mon osd nearfull ratio: 0.8
              • osd failsafe full ratio: 0.95
            • mon:
              • debug ms: 1
              • debug mon: 20
              • debug paxos: 20
              • mon op complaint time: 120
            • mgr:
              • debug ms: 1
              • debug mgr: 20
          • cephfs:
            • ec_profile:
              • m=2
              • k=2
              • crush-failure-domain=osd
          • log-ignorelist:
            • \(MDS_ALL_DOWN\)
            • \(MDS_UP_LESS_THAN_MAX\)
            • overall HEALTH_
            • \(FS_DEGRADED\)
            • \(MDS_FAILED\)
            • \(MDS_DEGRADED\)
            • \(FS_WITH_FAILED_MDS\)
            • \(MDS_DAMAGE\)
            • \(MDS_ALL_DOWN\)
            • \(MDS_UP_LESS_THAN_MAX\)
            • \(FS_INLINE_DATA_DEPRECATED\)
            • overall HEALTH_
            • \(OSD_DOWN\)
            • \(OSD_
            • but it is still running
            • is not responding
        • install:
          • ceph:
            • sha1: 717ce59b76c659aaef8c5aec1355c0ac5cef7234
        • admin_socket:
          • branch: pacific
        • thrashosds:
          • bdev_inject_crash_probability: 0.5
          • bdev_inject_crash: 2
      • success: True
      • failure_reason:
      • status: pass
      • nuke_on_error: True
      • os_type: ubuntu
      • runtime: 0:27:15
      • suite_sha1: 717ce59b76c659aaef8c5aec1355c0ac5cef7234