Description: fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mds-full}

Log: http://qa-proxy.ceph.com/teuthology/vshankar-2023-09-08_07:03:01-fs-wip-vshankar-testing-20230830.153114-testing-default-smithi/7391031/teuthology.log

Failure Reason:

"2023-09-08T07:37:56.609540+0000 osd.6 (osd.6) 3 : cluster [WRN] OSD bench result of 136088.206206 IOPS exceeded the threshold limit of 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]." in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/vshankar-2023-09-08_07:03:01-fs-wip-vshankar-testing-20230830.153114-testing-default-smithi/7391031/teuthology.log
  • archive_path: /home/teuthworker/archive/vshankar-2023-09-08_07:03:01-fs-wip-vshankar-testing-20230830.153114-testing-default-smithi/7391031
  • description: fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mds-full}
  • duration: 0:27:01
  • email: vshankar@redhat.com
  • failure_reason: "2023-09-08T07:37:56.609540+0000 osd.6 (osd.6) 3 : cluster [WRN] OSD bench result of 136088.206206 IOPS exceeded the threshold limit of 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]." in cluster log
  • flavor: default
  • job_id: 7391031
  • kernel:
    • client:
      • branch: testing
    • flavor: default
    • kdb: True
    • sha1: 533e5fc8ca061dbf84e56917eecb852edf10a398
  • last_in_suite: False
  • machine_type: smithi
  • name: vshankar-2023-09-08_07:03:01-fs-wip-vshankar-testing-20230830.153114-testing-default-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 22.04
  • overrides:
    • admin_socket:
      • branch: wip-vshankar-testing-20230830.153114
    • ceph:
      • cephfs:
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
          • disabled
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • pid file:
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • client.0:
          • debug client: 20
          • debug objectcacher: 20
          • debug objecter: 20
        • client.1:
          • debug client: 20
          • debug objectcacher: 20
          • debug objecter: 20
        • global:
          • ms die on skipped message: False
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 1
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon op complaint time: 120
          • mon osd backfillfull ratio: 0.6
          • mon osd full ratio: 0.7
          • mon osd nearfull ratio: 0.6
        • osd:
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • memstore device bytes: 200000000
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 1.0
          • osd mon report interval: 5
          • osd objectstore: memstore
          • osd op complaint time: 180
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • \(POOL_APP_NOT_ENABLED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • OSD full dropping all updates
        • OSD near full
        • pausewr flag
        • failsafe engaged, dropping updates
        • failsafe disengaged, no longer dropping
        • is full \(reached quota
        • POOL_FULL
        • POOL_BACKFILLFULL
        • PG_RECOVERY_FULL
        • PG_DEGRADED
      • sha1: 6c8583a87a5de28b8b0ab93d1f128c1a38a1bf71
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
      • fs: xfs
    • install:
      • ceph:
        • flavor: default
        • sha1: 6c8583a87a5de28b8b0ab93d1f128c1a38a1bf71
    • kclient:
      • syntax: v1
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
    • workunit:
      • branch: wip-vshankar-testing-20230830.153114
      • sha1: 6c8583a87a5de28b8b0ab93d1f128c1a38a1bf71
  • owner: scheduled_vshankar@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.c', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'client.2']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.b', 'mds.d', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'client.3']
  • sentry_event:
  • status: fail
  • success: False
  • branch: wip-vshankar-testing-20230830.153114
  • seed:
  • sha1: 6c8583a87a5de28b8b0ab93d1f128c1a38a1bf71
  • subset:
  • suite:
  • suite_branch: wip-vshankar-testing-20230830.153114
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 6c8583a87a5de28b8b0ab93d1f128c1a38a1bf71
  • targets:
    • smithi019.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNeF99IH1iwlyczPMjOYZiA+LJ5h3o1uFgJYSwEBYRY1gOlT9D8zCajkirawoOfcz/Dr6CO4ZteuGRhDHzWVPy4=
    • smithi140.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGjm7sR5d3Q9CHGw9862rB8YkDx8CrrMRgcBAUDHnKwr3TYX1YjoyI+D+fI9POXNSn6Y0y3noPiNYz6ubfRofUM=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • branch: testing
      • flavor: default
      • kdb: True
      • sha1: 533e5fc8ca061dbf84e56917eecb852edf10a398
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
          • cephfs-top
          • cephfs-mirror
        • rpm:
          • python3-cephfs
          • cephfs-top
          • cephfs-mirror
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
          • network-manager
          • iproute2
          • util-linux
          • dump
          • indent
          • libaio-dev
          • libtool-bin
          • uuid-dev
          • xfslibs-dev
          • postgresql
          • postgresql-client
          • postgresql-common
          • postgresql-contrib
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
          • NetworkManager
          • iproute
          • util-linux
          • libacl-devel
          • libaio-devel
          • libattr-devel
          • libtool
          • libuuid-devel
          • xfsdump
          • xfsprogs
          • xfsprogs-devel
          • libaio-devel
          • libtool
          • libuuid-devel
          • xfsprogs-devel
          • postgresql
          • postgresql-server
          • postgresql-contrib
      • flavor: default
      • sha1: 6c8583a87a5de28b8b0ab93d1f128c1a38a1bf71
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_full
  • teuthology_branch: main
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2023-09-08 07:03:21
  • started: 2023-09-08 07:04:20
  • updated: 2023-09-08 07:53:02
  • status_class: danger
  • runtime: 0:48:42
  • wait_time: 0:21:41