Description: fs:workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/fs/misc}}

Log: http://qa-proxy.ceph.com/teuthology/khiremat-2024-06-11_07:27:30-fs:workload-wip-khiremat-57953-scrub-error-0-distro-default-smithi/7750173/teuthology.log

Failure Reason:

"2024-06-11T08:15:08.320892+0000 mds.b (mds.0) 24 : cluster [WRN] Scrub error on inode 0x10000000274 (/volumes/qa/sv_1/a80d82d3-f501-48dc-8030-bdc0f564ba95/client.0/tmp/testdir/dir1/dir2/dir3/dir4/dir5) see mds.b log and `damage ls` output for details" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/khiremat-2024-06-11_07:27:30-fs:workload-wip-khiremat-57953-scrub-error-0-distro-default-smithi/7750173/teuthology.log
  • archive_path: /home/teuthworker/archive/khiremat-2024-06-11_07:27:30-fs:workload-wip-khiremat-57953-scrub-error-0-distro-default-smithi/7750173
  • description: fs:workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate 3-modules} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/fs/misc}}
  • duration: 1:34:18
  • email:
  • failure_reason: "2024-06-11T08:15:08.320892+0000 mds.b (mds.0) 24 : cluster [WRN] Scrub error on inode 0x10000000274 (/volumes/qa/sv_1/a80d82d3-f501-48dc-8030-bdc0f564ba95/client.0/tmp/testdir/dir1/dir2/dir3/dir4/dir5) see mds.b log and `damage ls` output for details" in cluster log
  • flavor: default
  • job_id: 7750173
  • kernel:
    • client:
      • sha1: distro
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: khiremat-2024-06-11_07:27:30-fs:workload-wip-khiremat-57953-scrub-error-0-distro-default-smithi
  • nuke_on_error:
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: wip-khiremat-57953-scrub-error-0
    • ceph:
      • cephfs:
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
        • max_mds: 5
        • session_timeout: 300
        • standby_replay: True
      • cluster-conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 1
          • mds bal fragment size max: 10000
          • mds bal merge size: 5
          • mds bal split bits: 3
          • mds bal split size: 100
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mon:
          • mon op complaint time: 120
          • mon warn on pool no app: False
        • osd:
          • osd op complaint time: 180
      • conf:
        • global:
          • ms die on skipped message: False
        • mds:
          • mds_bal_replicate_threshold: 1
          • mds_export_ephemeral_random_max: 0.1
        • mgr:
          • cephsqlite lock renewal timeout: 900000
          • debug client: 20
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon down mkfs grace: 300
        • osd:
          • bluestore block size: 96636764160
          • bluestore compression mode: aggressive
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd shutdown pgref assert: True
          • osd_max_omap_entries_per_request: 10000
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • FS_DEGRADED
        • FS_INLINE_DATA_DEPRECATED
        • FS_WITH_FAILED_MDS
        • MDS_ALL_DOWN
        • MDS_DAMAGE
        • MDS_DEGRADED
        • MDS_FAILED
        • MDS_INSUFFICIENT_STANDBY
        • MDS_UP_LESS_THAN_MAX
        • filesystem is online with fewer MDS than max_mds
        • POOL_APP_NOT_ENABLED
        • do not have an application enabled
        • overall HEALTH_
        • Replacing daemon
        • deprecated feature inline_data
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • PG_AVAILABILITY
        • PG_DEGRADED
        • Reduced data availability
        • Degraded data redundancy
        • slow metadata IO
        • SLOW_OPS
        • slow request
      • sha1: 7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e
      • subvols:
        • create: 2
        • subvol_options: --namespace-isolated --size 25000000000
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • ceph-fuse:
        • client.0:
          • mount_subvol_num: 0
      • check-counter:
        • counters:
          • mds:
            • mds.exported
            • mds.imported
            • min: 3
            • name: mds_cache.dir_update
            • min: 3
            • name: mds_cache.dir_update_receipt
            • mds.dir_split
      • install:
        • ceph:
          • flavor: default
          • sha1: 7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e
      • kclient:
        • client.0:
          • mount_subvol_num: 1
        • mntopts:
          • ms_mode=crc
          • nowsync
        • syntax: v1
      • selinux:
        • allowlist:
          • scontext=system_u:system_r:logrotate_t:s0
          • scontext=system_u:system_r:getty_t:s0
          • scontext=system_u:system_r:getty_t:s0
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: wip-khiremat-57953-scrub-error-0
        • sha1: 7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e
    • owner: scheduled_khiremat@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'mds.g', 'mds.j', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
      • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'mds.h', 'mds.k', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
      • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'mds.i', 'mds.l', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
    • sentry_event:
    • status: fail
    • success: False
    • branch: wip-khiremat-57953-scrub-error-0
    • seed: 3611
    • sha1: 7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e
    • subset: 3/4000
    • suite: fs:workload
    • suite_branch: wip-khiremat-57953-scrub-error-0
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph-c_7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph-ci.git
    • suite_sha1: 7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e
    • targets:
      • smithi005.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBF5hz4tqCKxsHlR6lQEOFwUbRxR9jCzWvoSympnHKavnBB3BkvaDViUAPxa2sKa12HoUoKuP6E129Tl7CzXhgCc=
      • smithi089.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOwKiFuyJ5qQ+IMF5YP5koCFt6IZtdOV4PajfqlBEch/AbtXUKvU2CnQvFQWJjzXhfW/JcbUdzJuKrD5O4BqDas=
      • smithi163.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGRBvnPlLwBRdUMnt5CPiTh1FH9LNIclvXSvki9ntP161X4r+wQKNstcBLg4uOPBig2V+DrdRCbLWeDB6j6Y2dI=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • client:
          • sha1: distro
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
        • extra_packages:
          • deb:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
          • rpm:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
        • extra_system_packages:
          • deb:
            • bison
            • flex
            • libelf-dev
            • libssl-dev
            • network-manager
            • iproute2
            • util-linux
            • dump
            • indent
            • libaio-dev
            • libtool-bin
            • uuid-dev
            • xfslibs-dev
            • postgresql
            • postgresql-client
            • postgresql-common
            • postgresql-contrib
          • rpm:
            • bison
            • flex
            • elfutils-libelf-devel
            • openssl-devel
            • NetworkManager
            • iproute
            • util-linux
            • libacl-devel
            • libaio-devel
            • libattr-devel
            • libtool
            • libuuid-devel
            • xfsdump
            • xfsprogs
            • xfsprogs-devel
            • libaio-devel
            • libtool
            • libuuid-devel
            • xfsprogs-devel
            • postgresql
            • postgresql-server
            • postgresql-contrib
        • flavor: default
        • sha1: 7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e
      • cephadm:
        • roleless: False
        • cephfs:
          • ec_profile:
            • m=2
            • k=2
            • crush-failure-domain=osd
          • max_mds: 5
          • session_timeout: 300
          • standby_replay: True
        • cluster-conf:
          • client:
            • client mount timeout: 600
            • debug client: 20
            • debug ms: 1
            • rados mon op timeout: 900
            • rados osd op timeout: 900
          • mds:
            • debug mds: 20
            • debug mds balancer: 20
            • debug ms: 1
            • mds bal fragment size max: 10000
            • mds bal merge size: 5
            • mds bal split bits: 3
            • mds bal split size: 100
            • mds debug frag: True
            • mds debug scatterstat: True
            • mds op complaint time: 180
            • mds verify scatter: True
            • osd op complaint time: 180
            • rados mon op timeout: 900
            • rados osd op timeout: 900
          • mon:
            • mon op complaint time: 120
            • mon warn on pool no app: False
          • osd:
            • osd op complaint time: 180
        • conf:
          • global:
            • ms die on skipped message: False
          • mds:
            • mds_bal_replicate_threshold: 1
            • mds_export_ephemeral_random_max: 0.1
          • mgr:
            • cephsqlite lock renewal timeout: 900000
            • debug client: 20
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
            • mon down mkfs grace: 300
          • osd:
            • bluestore block size: 96636764160
            • bluestore compression mode: aggressive
            • bluestore fsck on mount: True
            • debug bluefs: 20
            • debug bluestore: 20
            • debug ms: 1
            • debug osd: 20
            • debug rocksdb: 10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
            • osd shutdown pgref assert: True
            • osd_max_omap_entries_per_request: 10000
        • flavor: default
        • fs: xfs
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • FS_DEGRADED
          • FS_INLINE_DATA_DEPRECATED
          • FS_WITH_FAILED_MDS
          • MDS_ALL_DOWN
          • MDS_DAMAGE
          • MDS_DEGRADED
          • MDS_FAILED
          • MDS_INSUFFICIENT_STANDBY
          • MDS_UP_LESS_THAN_MAX
          • filesystem is online with fewer MDS than max_mds
          • POOL_APP_NOT_ENABLED
          • do not have an application enabled
          • overall HEALTH_
          • Replacing daemon
          • deprecated feature inline_data
          • overall HEALTH_
          • \(OSD_DOWN\)
          • \(OSD_
          • but it is still running
          • is not responding
          • PG_AVAILABILITY
          • PG_DEGRADED
          • Reduced data availability
          • Degraded data redundancy
          • slow metadata IO
          • SLOW_OPS
          • slow request
        • sha1: 7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e
        • subvols:
          • create: 2
          • subvol_options: --namespace-isolated --size 25000000000
        • cluster: ceph
        • cephadm_mode: root
      • cephadm.shell:
        • mon.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
      • cephadm.shell:
        • mon.a:
          • ceph fs dump
          • ceph osd dump
      • fs.ready:
        • timeout: 300
      • sequential:
        • mgrmodules
      • kclient:
      • exec:
        • mon.a:
          • ceph fs set cephfs balance_automate false
          • ceph fs subvolumegroup pin cephfs qa random 0.10
      • check-counter:
        • fwd_scrub:
          • scrub_timeout: 900
          • sleep_between_iterations: 1
          • cluster: ceph
        • background_exec:
          • mon.a:
            • while sleep 13; do ceph tell mds.cephfs:0 flush journal; done
        • quiescer:
        • workunit:
          • clients:
            • all:
              • fs/misc
          • branch: wip-khiremat-57953-scrub-error-0
          • sha1: 7b5561ec5ddfb30fdbb0f9b22417fe4282ded68e
      • teuthology_branch: main
      • verbose: False
      • pcp_grafana_url:
      • priority: 75
      • user: khiremat
      • queue:
      • posted: 2024-06-11 07:27:45
      • started: 2024-06-11 07:34:08
      • updated: 2024-06-11 09:19:18
      • status_class: danger
      • runtime: 1:45:10
      • wait_time: 0:10:52