Description: fs:workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/pjd}}

Log: http://qa-proxy.ceph.com/teuthology/khiremat-2024-01-28_10:21:48-fs:workload-wip-khiremat-49945-referent-inode-3-distro-default-smithi/7535748/teuthology.log

Failure Reason:

hit max job timeout

  • log_href: http://qa-proxy.ceph.com/teuthology/khiremat-2024-01-28_10:21:48-fs:workload-wip-khiremat-49945-referent-inode-3-distro-default-smithi/7535748/teuthology.log
  • archive_path: /home/teuthworker/archive/khiremat-2024-01-28_10:21:48-fs:workload-wip-khiremat-49945-referent-inode-3-distro-default-smithi/7535748
  • description: fs:workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/pjd}}
  • duration:
  • email:
  • failure_reason: hit max job timeout
  • flavor:
  • job_id: 7535748
  • kernel:
    • client:
      • branch: testing
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: khiremat-2024-01-28_10:21:48-fs:workload-wip-khiremat-49945-referent-inode-3-distro-default-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 8.6
  • overrides:
    • admin_socket:
      • branch: wip-khiremat-49945-referent-inode-3
    • ceph:
      • cephfs:
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
        • max_mds: 3
        • session_timeout: 300
        • standby_replay: True
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • fuse default permissions: False
          • fuse set user groups: True
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • global:
          • ms die on skipped message: False
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 1
          • mds bal fragment size max: 10000
          • mds bal merge size: 5
          • mds bal split bits: 3
          • mds bal split size: 100
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • cephsqlite lock renewal timeout: 900000
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd op complaint time: 180
          • osd shutdown pgref assert: True
          • osd_max_omap_entries_per_request: 10000
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • \(POOL_APP_NOT_ENABLED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • slow metadata IO
        • SLOW_OPS
        • slow request
      • sha1: c8f2a2aa1011587fa007db2cafda0fd9cfe4c9d2
      • subvols:
        • create: 2
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd:
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 20
            • debug bluestore: 20
            • debug rocksdb: 10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
        • fs: xfs
      • ceph-fuse:
        • client.0:
          • mount_subvol_num: 0
      • check-counter:
        • counters:
          • mds:
            • mds.exported
            • mds.imported
      • install:
        • ceph:
          • flavor: default
          • sha1: c8f2a2aa1011587fa007db2cafda0fd9cfe4c9d2
      • kclient:
        • client.0:
          • mount_subvol_num: 1
        • mntopts:
          • ms_mode=secure
          • wsync
        • syntax: v2
      • selinux:
        • whitelist:
          • scontext=system_u:system_r:logrotate_t:s0
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: wip-khiremat-49945-referent-inode-3
        • sha1: c8f2a2aa1011587fa007db2cafda0fd9cfe4c9d2
    • owner: scheduled_khiremat@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'mds.g', 'mds.j', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
      • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'mds.h', 'mds.k', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
      • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'mds.i', 'mds.l', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
    • sentry_event:
    • status: dead
    • success:
    • branch: wip-khiremat-49945-referent-inode-3
    • seed:
    • sha1: c8f2a2aa1011587fa007db2cafda0fd9cfe4c9d2
    • subset:
    • suite:
    • suite_branch: wip-khiremat-49945-referent-inode-3
    • suite_path:
    • suite_relpath:
    • suite_repo:
    • suite_sha1: c8f2a2aa1011587fa007db2cafda0fd9cfe4c9d2
    • targets:
      • smithi124.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOTk71JvBDDpbmIHDRD+K6J+PIG1EBhrhkZ1QZGv35Ne4jdJdqtXha9u+Zfy+Tn+pRvdxFtVgMXsX/7rR5ocWv0=
      • smithi138.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBD/p3LDltuXWnb17XbjZYT4pS1s+9wkZSYxOBC0wxFJF+tvOScGFjrl9NzSqVGiiJ3Lw/A06cV+yRbv4FT/DelY=
      • smithi188.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAk9FNGsaaCQ+rL5eMIpqzKCDYJmVpEhk17U0ujKveX+rYHVMSVwLuSKWKGk/V9xUeoJfGH2NUFj1ppFsslNrZo=
    • tasks:
      • pexec:
        • all:
          • sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
          • sudo dnf -y module reset container-tools
          • sudo dnf -y module install container-tools:rhel8 --allowerasing --nobest
          • sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
      • install:
        • extra_packages:
          • deb:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
          • rpm:
            • python3-cephfs
            • cephfs-top
            • cephfs-mirror
        • extra_system_packages:
          • deb:
            • bison
            • flex
            • libelf-dev
            • libssl-dev
            • network-manager
            • iproute2
            • util-linux
            • dump
            • indent
            • libaio-dev
            • libtool-bin
            • uuid-dev
            • xfslibs-dev
            • postgresql
            • postgresql-client
            • postgresql-common
            • postgresql-contrib
          • rpm:
            • bison
            • flex
            • elfutils-libelf-devel
            • openssl-devel
            • NetworkManager
            • iproute
            • util-linux
            • libacl-devel
            • libaio-devel
            • libattr-devel
            • libtool
            • libuuid-devel
            • xfsdump
            • xfsprogs
            • xfsprogs-devel
            • libaio-devel
            • libtool
            • libuuid-devel
            • xfsprogs-devel
            • postgresql
            • postgresql-server
            • postgresql-contrib
      • cephadm:
        • roleless: False
      • cephadm.shell:
        • mon.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
      • cephadm.shell:
        • mon.a:
          • ceph fs dump
          • ceph osd dump
      • fs.ready:
        • timeout: 300
      • kclient:
      • exec:
        • mon.a:
          • ceph fs set cephfs balance_automate true
      • check-counter:
        • fwd_scrub:
          • scrub_timeout: 900
          • sleep_between_iterations: 1
        • background_exec:
          • mon.a:
            • while sleep 13; do ceph tell mds.cephfs:0 flush journal; done
        • workunit:
          • clients:
            • all:
              • suites/pjd.sh
          • timeout: 6h
      • teuthology_branch: main
      • verbose: False
      • pcp_grafana_url:
      • priority:
      • user:
      • queue:
      • posted: 2024-01-28 10:22:03
      • started: 2024-01-28 11:32:36
      • updated: 2024-01-28 23:42:58
      • status_class: danger
      • runtime: 12:10:22