Description: rbd/qemu/{cache/writeback clusters/{fixed-3 openstack} features/readbalance msgr-failures/few objectstore/bluestore-hybrid pool/ec-cache-pool supported-random-distro$/{centos_8} workloads/qemu_fsstress}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2021-05-08_06:07:03-rbd-pacific-distro-basic-smithi/6105719/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2021-05-08_06:07:03-rbd-pacific-distro-basic-smithi/6105719/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2021-05-08_06:07:03-rbd-pacific-distro-basic-smithi/6105719
  • description: rbd/qemu/{cache/writeback clusters/{fixed-3 openstack} features/readbalance msgr-failures/few objectstore/bluestore-hybrid pool/ec-cache-pool supported-random-distro$/{centos_8} workloads/qemu_fsstress}
  • duration: 0:12:41
  • email: ceph-qa@ceph.io
  • failure_reason:
  • flavor: basic
  • job_id: 6105719
  • kernel:
    • sha1: distro
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2021-05-08_06:07:03-rbd-pacific-distro-basic-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.2
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 1/20
          • debug bluefs: 1/20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 4/10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • sha1: df487331eefb15b716b05118803c8aa8f9ad6ffb
      • branch: pacific
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 8cff8f0cd24ae0ca939c1647cfbcaaad27e3ed1f
      • fs: xfs
      • conf:
        • global:
          • ms inject socket failures: 5000
          • mon client directed command retry: 5
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • rbd read from replica policy: balance
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
        • osd:
          • mon osd full ratio: 0.9
          • bluestore allocator: hybrid
          • bluestore fsck on mount: True
          • debug osd: 20
          • debug bluestore: 1/20
          • debug bluefs: 1/20
          • osd objectstore: bluestore
          • debug ms: 1
          • osd failsafe full ratio: 0.95
          • bluefs allocator: hybrid
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug rocksdb: 4/10
          • mon osd nearfull ratio: 0.8
          • mon osd backfillfull_ratio: 0.85
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • but it is still running
        • \(OSD_SLOW_PING_TIME
        • overall HEALTH_
        • \(CACHE_POOL_NEAR_FULL\)
        • \(CACHE_POOL_NO_HIT_SET\)
    • install:
      • ceph:
        • sha1: 8cff8f0cd24ae0ca939c1647cfbcaaad27e3ed1f
    • admin_socket:
      • branch: pacific
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.c', 'mgr.x', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mgr.y', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
  • sentry_event:
  • status: pass
  • success: True
  • branch: pacific
  • seed:
  • sha1: 8cff8f0cd24ae0ca939c1647cfbcaaad27e3ed1f
  • subset:
  • suite:
  • suite_branch: pacific
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: df487331eefb15b716b05118803c8aa8f9ad6ffb
  • targets:
    • smithi104.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDe5RnPXf5zqzuc0MPYRA3SM3hHyCXZuGHvPAmAKzJ8n2lC5j9eGtofJMHe7kpdHKdH5CIltZur2mdrwLywmYv1m5cbgBpzvchW8iq2bIF6izqPJ58oBZdKjWqAK6gsTIP6L3GGDW4RS6cw6wa6ZgCjypdsBHMmNAYoV1fd6atbX6EQy31sm7+tDoPl4k8eBWn0P705ecfNSzLk3VlO38/usc4vFVdVoljjOvA6P6h6EyXPzr8ZpeMdouk9jy/2fsH657cUKW0KsYVHmP9wnhSBpnbwLmnBZ3tA1uldWkcicsvMi6xNXspc3c7dBYG9/gDFbUicOAAd5EXyvZ/m/WXbsIncZK0gHNR0yjUFwY8XhwyFGNaLK+OevH+7wQuDwTe9HoAlS/VxpO0wR+V7HtLt+UJqcFzYGhHcAjD/72J4CK2CrAGQ4U8l8scFwStRwagefID0X29/8vRY0rFStWOguGjyw07GnlvBmJ3ym9VOD53y1rsIi/EjXx1xZJs/uZk=
    • smithi052.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDETfOpRgHbev73NBh0UEkxfb+gQnoRppDokrAmX2JzdujN3GF8oYP8VYBSsLk0nxvV5feIUN4NIx1QDVXzWklvppjziYNmRnq2V5EY0vVn3Z+5HkBwJIc/i9r4C8UW+ydfnSobVe5ipkNQ6l9PyrYfd7tnFg2CxQ0/lDQk3fhSl0U5TnErRM/t3Jd0hv/5XDLBKerX0lXyMfpUcL2s2lwtTd+vQACvZ7HB7e14EzNe7PEcTj1xmacg0c6wCdaLOPvJCQmikIIBRW2VP59ixH+FISZOx3Q+AHEPFIm/BWbvpA7jptnMb7q4nX49e6t4ioQQfY450EBcsV8nhTuEh3OK944yIE9tEdz8gvdzgFAbJm0xLEOimU+tXhJ9sWo6UH7NWNuGi3ftVPbNCGYZoDQdANS/WFS50RlHAr2TAbRcgu+FluGVeEPBGAmuLMreddXVoOWTLrsGu3fXAYQWdJXyogGZvGHoc1T0TMR52TGnfH7z3Av7c41jQ31LxdG+np8=
    • smithi185.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBna+8snWS5s4XeA08cX/HrpdukeHjfThdYpKV9BAIXr0911gaisvv6knr0FtmSIhHUVAh6NWXLBYp143X1s1dZiTYw9a/TG5Pvb2riFBcAUinoLvT1GS8eOVw/Zg1TywxukuOdSw4hwUtO7IcEufugYD8dzIycKqyWJ/dkvVuILpkmeq1ftncqXwOLu2Fwb/1YHV1DVLIWFKBM/rBoYVnWbj9/RvEs5hoMcr3VB5blaNPZbQWg/uPcyLrHFU9iwVDua1+6cWVZxLB8Px9WP+LLcue9Bxw+JqhNSyhSVcCA0I+DK7wSqCGUEZ6DYOs47Gv5qb77PpnGChF3Ia87pBCBmdwfc/Pxfd2cTbp2zfB+/jH2cfc45Gjo4JjAhwYJ3sYmyuJbpuLZJ948sP2pCSXd5QuW0oROoZzqCYQCjcHZjeD2tybsHQjYQHuNDs8A1olBq8yyhLQ8mgYZr5rY0zmuz2/Q/Y9VpNiPGY8G3a/t4kRk8MKh9ONFUoo/H7AHAs=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • cluster: ceph
      • sha1: 8cff8f0cd24ae0ca939c1647cfbcaaad27e3ed1f
      • conf:
        • global:
          • ms inject socket failures: 5000
          • mon client directed command retry: 5
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • rbd cache policy: writeback
          • rbd read from replica policy: balance
          • rbd cache: True
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
        • osd:
          • mon osd full ratio: 0.9
          • bluestore allocator: hybrid
          • bluestore fsck on mount: True
          • debug osd: 20
          • debug bluestore: 1/20
          • debug bluefs: 1/20
          • osd objectstore: bluestore
          • debug ms: 1
          • osd failsafe full ratio: 0.95
          • bluefs allocator: hybrid
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug rocksdb: 4/10
          • mon osd nearfull ratio: 0.8
          • mon osd backfillfull_ratio: 0.85
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • but it is still running
        • \(OSD_SLOW_PING_TIME
        • overall HEALTH_
        • \(CACHE_POOL_NEAR_FULL\)
        • \(CACHE_POOL_NO_HIT_SET\)
    • exec:
      • osd.0:
        • ceph osd require-osd-release pacific
        • ceph osd set-require-min-compat-client octopus
    • exec:
      • client.0:
        • sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
        • sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
        • sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile
        • sudo ceph osd pool create cache 4
        • sudo ceph osd tier add rbd cache
        • sudo ceph osd tier cache-mode cache writeback
        • sudo ceph osd tier set-overlay rbd cache
        • sudo ceph osd pool set cache hit_set_type bloom
        • sudo ceph osd pool set cache hit_set_count 8
        • sudo ceph osd pool set cache hit_set_period 60
        • sudo ceph osd pool set cache target_max_objects 250
        • rbd pool init rbd
    • qemu:
      • all:
        • test: qa/workunits/suites/fsstress.sh
        • clone: True
        • disks:
          • image_name: client.0.0
          • image_url: http://download.ceph.com/qa/ubuntu-12.04.qcow2
          • device_type: filesystem
          • image_size: 10240
          • action: create
          • encryption_format: none
          • action: create
          • encryption_format: none
          • image_name: client.0.1
          • device_type: filesystem
          • image_size: 10240
          • parent_name: client.0.0
          • device_letter: a
          • image_name: client.0.0-clone
          • image_url: http://download.ceph.com/qa/ubuntu-12.04.qcow2
          • device_type: filesystem
          • image_size: 10240
          • action: clone
          • encryption_format: none
          • parent_name: client.0.1
          • device_letter: b
          • image_name: client.0.1-clone
          • device_type: filesystem
          • image_size: 10240
          • action: clone
          • encryption_format: none
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2021-05-08 06:08:56
  • started: 2021-05-09 03:28:46
  • updated: 2021-05-09 03:54:10
  • status_class: success
  • runtime: 0:25:24
  • wait_time: 0:12:43