Description: fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down osd pg-warn} tasks/{0-client 1-tests/fscrypt-dbench}}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2024-03-31_21:24:02-fs-squid-distro-default-smithi/7633243/teuthology.log

Failure Reason:

hit max job timeout

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2024-03-31_21:24:02-fs-squid-distro-default-smithi/7633243/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2024-03-31_21:24:02-fs-squid-distro-default-smithi/7633243
  • description: fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down osd pg-warn} tasks/{0-client 1-tests/fscrypt-dbench}}
  • duration:
  • email: ceph-qa@ceph.com
  • failure_reason: hit max job timeout
  • flavor:
  • job_id: 7633243
  • kernel:
    • client:
      • branch: testing
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2024-03-31_21:24:02-fs-squid-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: squid
    • ceph:
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • global:
          • mon pg warn min per osd: 0
          • ms die on skipped message: False
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 1
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • debug client: 20
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon down mkfs grace: 300
          • mon op complaint time: 120
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore allocator: bitmap
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 1/20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 4/10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd op complaint time: 180
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • FS_DEGRADED
        • FS_INLINE_DATA_DEPRECATED
        • FS_WITH_FAILED_MDS
        • MDS_ALL_DOWN
        • MDS_DAMAGE
        • MDS_DEGRADED
        • MDS_FAILED
        • MDS_INSUFFICIENT_STANDBY
        • MDS_UP_LESS_THAN_MAX
        • POOL_APP_NOT_ENABLED
        • overall HEALTH_
        • Replacing daemon
        • Reduced data availability
        • Degraded data redundancy
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
      • sha1: 384f31f3d3aee7f8ba3f60846b8844c049165583
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd:
            • bdev async discard: True
            • bdev enable discard: True
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 1/20
            • debug bluestore: 1/20
            • debug rocksdb: 4/10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
        • fs: xfs
      • install:
        • ceph:
          • flavor: default
          • sha1: 384f31f3d3aee7f8ba3f60846b8844c049165583
      • kclient:
        • syntax: v2
      • selinux:
        • allowlist:
          • scontext=system_u:system_r:getty_t:s0
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: squid
        • sha1: 384f31f3d3aee7f8ba3f60846b8844c049165583
    • owner: scheduled_teuthology@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mgr.y', 'mds.a', 'mds.c', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
      • ['mon.b', 'mon.c', 'mgr.x', 'mds.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
      • ['client.0']
    • sentry_event:
    • status: dead
    • success:
    • branch: squid
    • seed: 8238
    • sha1: 384f31f3d3aee7f8ba3f60846b8844c049165583
    • subset: 361/512
    • suite: fs
    • suite_branch: squid
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph_384f31f3d3aee7f8ba3f60846b8844c049165583/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph.git
    • suite_sha1: 384f31f3d3aee7f8ba3f60846b8844c049165583
    • targets:
      • smithi027.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFU5mMNhVCxK4s4J/bAjv7igIj2TYBoX9XmlIoX1kGQsgXS4NLnjkKp3KqIICuN2dTucPql0hU3CaqoBnCPC3N4=
      • smithi032.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHE2Sb1o7luy5TJb1QhyctIlYy+TvRbxaHVB/kY9ART4+ceog6ipoZPHQNcbEKmxzICF/MXc7ivSUsfsOlFhiHc=
      • smithi060.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBA9fhrFrE5+ab0gdo8zN+Kjxv00xCfzPf6kpKqRkUYxttvgtUeQYVCwVBwH4eB7JbIkg32xUg5YPmNqgGHIyrC4=
    • tasks:
      • install:
        • extra_packages:
          • deb:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
          • rpm:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
        • extra_system_packages:
          • deb:
            • bison
            • flex
            • libelf-dev
            • libssl-dev
            • network-manager
            • iproute2
            • util-linux
            • dump
            • indent
            • libaio-dev
            • libtool-bin
            • uuid-dev
            • xfslibs-dev
            • postgresql
            • postgresql-client
            • postgresql-common
            • postgresql-contrib
          • rpm:
            • bison
            • flex
            • elfutils-libelf-devel
            • openssl-devel
            • NetworkManager
            • iproute
            • util-linux
            • libacl-devel
            • libaio-devel
            • libattr-devel
            • libtool
            • libuuid-devel
            • xfsdump
            • xfsprogs
            • xfsprogs-devel
            • libaio-devel
            • libtool
            • libuuid-devel
            • xfsprogs-devel
            • postgresql
            • postgresql-server
            • postgresql-contrib
      • ceph:
        • create_rbd_pool: False
      • kclient:
      • workunit:
        • clients:
          • client.0:
            • fs/fscrypt.sh none dbench
            • fs/fscrypt.sh unlocked dbench
        • timeout: 6h
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 100
    • user: teuthology
    • queue:
    • posted: 2024-03-31 21:26:50
    • started: 2024-04-01 01:10:27
    • updated: 2024-04-01 13:27:07
    • status_class: danger
    • runtime: 12:16:40