Description: fs/functional/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} subvol_versions/create_subvol_version_v2 tasks/mds-full}

Log: http://qa-proxy.ceph.com/teuthology/rishabh-2024-09-13_14:42:16-fs-wip-rishabh-testing-20240910.100730-reef-distro-default-smithi/7903991/teuthology.log

Failure Reason:

"2024-09-13T18:35:27.039521+0000 osd.6 (osd.6) 3 : cluster [WRN] OSD bench result of 137251.503865 IOPS exceeded the threshold limit of 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]." in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/rishabh-2024-09-13_14:42:16-fs-wip-rishabh-testing-20240910.100730-reef-distro-default-smithi/7903991/teuthology.log
  • archive_path: /home/teuthworker/archive/rishabh-2024-09-13_14:42:16-fs-wip-rishabh-testing-20240910.100730-reef-distro-default-smithi/7903991
  • description: fs/functional/{begin/{0-install 1-ceph 2-logrotate 3-modules} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} subvol_versions/create_subvol_version_v2 tasks/mds-full}
  • duration: 0:20:04
  • email:
  • failure_reason: "2024-09-13T18:35:27.039521+0000 osd.6 (osd.6) 3 : cluster [WRN] OSD bench result of 137251.503865 IOPS exceeded the threshold limit of 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]." in cluster log
  • flavor: default
  • job_id: 7903991
  • kernel:
    • client:
      • branch: testing
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: rishabh-2024-09-13_14:42:16-fs-wip-rishabh-testing-20240910.100730-reef-distro-default-smithi
  • nuke_on_error:
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: wip-rishabh-testing-20240910.100730-reef
    • ceph:
      • cephfs:
        • ec_profile:
          • disabled
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • pid file:
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • client.0:
          • debug client: 20
          • debug objectcacher: 20
          • debug objecter: 20
        • client.1:
          • debug client: 20
          • debug objectcacher: 20
          • debug objecter: 20
        • global:
          • ms die on skipped message: False
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 1
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • debug client: 20
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon op complaint time: 120
          • mon osd backfillfull ratio: 0.6
          • mon osd full ratio: 0.7
          • mon osd nearfull ratio: 0.6
          • mon warn on pool no app: False
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore allocator: bitmap
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 1/20
          • debug bluestore: 1/20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 4/10
          • memstore device bytes: 200000000
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 1.0
          • osd mon report interval: 5
          • osd objectstore: memstore
          • osd op complaint time: 180
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • FS_DEGRADED
        • FS_INLINE_DATA_DEPRECATED
        • FS_WITH_FAILED_MDS
        • MDS_ALL_DOWN
        • MDS_DAMAGE
        • MDS_DEGRADED
        • MDS_FAILED
        • MDS_INSUFFICIENT_STANDBY
        • MDS_UP_LESS_THAN_MAX
        • filesystem is online with fewer MDS than max_mds
        • POOL_APP_NOT_ENABLED
        • do not have an application enabled
        • overall HEALTH_
        • Replacing daemon
        • deprecated feature inline_data
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • MON_DOWN
        • PG_AVAILABILITY
        • PG_DEGRADED
        • Reduced data availability
        • Degraded data redundancy
        • OSD full dropping all updates
        • OSD near full
        • pausewr flag
        • failsafe engaged, dropping updates
        • failsafe disengaged, no longer dropping
        • is full \(reached quota
        • POOL_FULL
        • POOL_BACKFILLFULL
        • PG_RECOVERY_FULL
        • PG_DEGRADED
      • sha1: 6c6b083f148f500c4f56d402b9e4ffcab6f1e2d3
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd:
            • bdev async discard: True
            • bdev enable discard: True
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 1/20
            • debug bluestore: 1/20
            • debug rocksdb: 4/10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
        • fs: xfs
      • install:
        • ceph:
          • flavor: default
          • sha1: 6c6b083f148f500c4f56d402b9e4ffcab6f1e2d3
      • kclient:
        • syntax: v2
      • selinux:
        • allowlist:
          • scontext=system_u:system_r:getty_t:s0
      • subvolume_version: 2
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: wip-rishabh-testing-20240910.100730-reef
        • sha1: 6c6b083f148f500c4f56d402b9e4ffcab6f1e2d3
    • owner: scheduled_rishabh@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mgr.y', 'mds.a', 'mds.c', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'client.2']
      • ['mon.b', 'mon.c', 'mgr.x', 'mds.b', 'mds.d', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'client.3']
    • sentry_event:
    • status: fail
    • success: False
    • branch: wip-rishabh-testing-20240910.100730-reef
    • seed: 6439
    • sha1: 6c6b083f148f500c4f56d402b9e4ffcab6f1e2d3
    • subset: 964/1024
    • suite: fs
    • suite_branch: wip-rishabh-testing-20240910.100730-reef
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph-c_6c6b083f148f500c4f56d402b9e4ffcab6f1e2d3/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph-ci.git
    • suite_sha1: 6c6b083f148f500c4f56d402b9e4ffcab6f1e2d3
    • targets:
      • smithi192.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBG+FKXfHyDAI388pAzNkG9iN/+a6FlEr/n7nD5kIo66Dd++5mTFFvMJ1PwTXkrS8SdawLNf3Feagw/suQMDDHas=
      • smithi194.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOQri4ZhPaft15AKHIODYXUxGJIy+tosH7V8FPugKotbayy7cJBiMk9KCskWFQMX67BNkBG0WGHb+0JeXP1wRDU=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • client:
          • branch: testing
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
        • extra_packages:
          • deb:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
          • rpm:
            • python3-cephfs
            • cephfs-top
            • cephfs-mirror
        • extra_system_packages:
          • deb:
            • bison
            • flex
            • libelf-dev
            • libssl-dev
            • network-manager
            • iproute2
            • util-linux
            • dump
            • indent
            • libaio-dev
            • libtool-bin
            • uuid-dev
            • xfslibs-dev
            • postgresql
            • postgresql-client
            • postgresql-common
            • postgresql-contrib
          • rpm:
            • bison
            • flex
            • elfutils-libelf-devel
            • openssl-devel
            • NetworkManager
            • iproute
            • util-linux
            • libacl-devel
            • libaio-devel
            • libattr-devel
            • libtool
            • libuuid-devel
            • xfsdump
            • xfsprogs
            • xfsprogs-devel
            • libaio-devel
            • libtool
            • libuuid-devel
            • xfsprogs-devel
            • postgresql
            • postgresql-server
            • postgresql-contrib
        • flavor: default
        • sha1: 6c6b083f148f500c4f56d402b9e4ffcab6f1e2d3
      • ceph:
        • create_rbd_pool: False
        • cephfs:
          • ec_profile:
            • disabled
        • conf:
          • client:
            • client mount timeout: 600
            • debug client: 20
            • debug ms: 1
            • pid file:
            • rados mon op timeout: 900
            • rados osd op timeout: 900
          • client.0:
            • debug client: 20
            • debug objectcacher: 20
            • debug objecter: 20
          • client.1:
            • debug client: 20
            • debug objectcacher: 20
            • debug objecter: 20
          • global:
            • ms die on skipped message: False
          • mds:
            • debug mds: 20
            • debug mds balancer: 20
            • debug ms: 1
            • mds debug frag: True
            • mds debug scatterstat: True
            • mds op complaint time: 180
            • mds verify scatter: True
            • osd op complaint time: 180
            • rados mon op timeout: 900
            • rados osd op timeout: 900
          • mgr:
            • debug client: 20
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
            • mon op complaint time: 120
            • mon osd backfillfull ratio: 0.6
            • mon osd full ratio: 0.7
            • mon osd nearfull ratio: 0.6
            • mon warn on pool no app: False
          • osd:
            • bdev async discard: True
            • bdev enable discard: True
            • bluestore allocator: bitmap
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 1/20
            • debug bluestore: 1/20
            • debug ms: 1
            • debug osd: 20
            • debug rocksdb: 4/10
            • memstore device bytes: 200000000
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 1.0
            • osd mon report interval: 5
            • osd objectstore: memstore
            • osd op complaint time: 180
        • flavor: default
        • fs: xfs
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • FS_DEGRADED
          • FS_INLINE_DATA_DEPRECATED
          • FS_WITH_FAILED_MDS
          • MDS_ALL_DOWN
          • MDS_DAMAGE
          • MDS_DEGRADED
          • MDS_FAILED
          • MDS_INSUFFICIENT_STANDBY
          • MDS_UP_LESS_THAN_MAX
          • filesystem is online with fewer MDS than max_mds
          • POOL_APP_NOT_ENABLED
          • do not have an application enabled
          • overall HEALTH_
          • Replacing daemon
          • deprecated feature inline_data
          • overall HEALTH_
          • \(OSD_DOWN\)
          • \(OSD_
          • but it is still running
          • is not responding
          • MON_DOWN
          • PG_AVAILABILITY
          • PG_DEGRADED
          • Reduced data availability
          • Degraded data redundancy
          • OSD full dropping all updates
          • OSD near full
          • pausewr flag
          • failsafe engaged, dropping updates
          • failsafe disengaged, no longer dropping
          • is full \(reached quota
          • POOL_FULL
          • POOL_BACKFILLFULL
          • PG_RECOVERY_FULL
          • PG_DEGRADED
        • sha1: 6c6b083f148f500c4f56d402b9e4ffcab6f1e2d3
        • cluster: ceph
      • sequential:
        • mgrmodules
      • kclient:
      • cephfs_test_runner:
        • modules:
          • tasks.cephfs.test_full
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 70
    • user: rishabh
    • queue:
    • posted: 2024-09-13 14:45:08
    • started: 2024-09-13 18:15:18
    • updated: 2024-09-13 18:49:19
    • status_class: danger
    • runtime: 0:34:01
    • wait_time: 0:13:57