Description: fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/5 scrub/no standby-replay tasks/{0-check-counter workunit/fs/test_o_trunc} wsync/{no}}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2022-09-20_04:17:02-fs-pacific-distro-default-smithi/7038519/teuthology.log

Failure Reason:

hit max job timeout

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2022-09-20_04:17:02-fs-pacific-distro-default-smithi/7038519/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2022-09-20_04:17:02-fs-pacific-distro-default-smithi/7038519
  • description: fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/5 scrub/no standby-replay tasks/{0-check-counter workunit/fs/test_o_trunc} wsync/{no}}
  • duration:
  • email: ceph-qa@ceph.io
  • failure_reason: hit max job timeout
  • flavor:
  • job_id: 7038519
  • kernel:
    • client:
      • branch: testing
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2022-09-20_04:17:02-fs-pacific-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: pacific
    • ceph:
      • cephfs:
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
        • max_mds: 5
        • session_timeout: 300
        • standby_replay: True
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • global:
          • ms die on skipped message: False
        • mds:
          • debug mds: 20
          • debug ms: 1
          • mds bal fragment size max: 10000
          • mds bal merge size: 5
          • mds bal split bits: 3
          • mds bal split size: 100
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • bluestore block size: 96636764160
          • bluestore compression mode: aggressive
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd op complaint time: 180
          • osd shutdown pgref assert: True
          • osd_max_omap_entries_per_request: 10000
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
      • sha1: 49ab619e96a6d6180c168401fa99e4d359b11dc9
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • check-counter:
      • counters:
        • mds:
          • mds.exported
          • mds.imported
      • dry_run: True
    • install:
      • ceph:
        • flavor: default
        • sha1: 49ab619e96a6d6180c168401fa99e4d359b11dc9
    • kclient:
      • mntopts:
        • nowsync
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
        • scontext=system_u:system_r:logrotate_t:s0
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
    • workunit:
      • branch: pacific
      • sha1: 49ab619e96a6d6180c168401fa99e4d359b11dc9
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'mds.g', 'mds.j', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
    • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'mds.h', 'mds.k', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
    • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'mds.i', 'mds.l', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
  • sentry_event:
  • status: dead
  • success:
  • branch: pacific
  • seed:
  • sha1: 49ab619e96a6d6180c168401fa99e4d359b11dc9
  • subset:
  • suite:
  • suite_branch: pacific
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 49ab619e96a6d6180c168401fa99e4d359b11dc9
  • targets:
    • smithi032.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDhz2UIPs8hzbQ2ah+gO9ZWF267rIMfcEV9am8l9F9xb9JkziN/kZhXRFKYe7NN1JxdrSIbSzIz9jNuMD1YlLtQJnYXBZZNww3/1U6GhQJo5EAelUNxAW/BFERTG7XtZzKKo/9pMWBwBf3VOR9XdVjnMFqsZTsnU8d1ctftLfL6eIR0cQEKZ70djJ0iV0gkYKBYGaeOtCwIbEr7tLW+sNKRecDVSmhjuwojdBtGy+7Y4RYF5iMUTuM70lr8QBEs6iObIpyP/jNN23vg2JUMyWbykco6UKiXkz93cG5AQq3TwNOKXnb9CmcxfqYJ0lXKu49Wiu7tT1TUyVQ/znTaBAaYMenn/7q1BSr2SDwGVcOFhMeitw63Ys11ykf1HFATXZKqjieG4AgSgcZQhQ7oZ/JaL0vGmE41GDh79f8ggipLGo3bTT8cFY38pPtWuBwXB+ml4r4IRO39tXANXxJX4dfBU0y2DLcUK6TRlILGGJa3edu8RDCcJOpXXhHUo/eFxVU=
    • smithi087.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3kzPhlPGimnB8lSWbfGu2HHs1HIAMSX5H3cJ29fCJUpstfxzs0tqLDeu56NVHkj4x+LhUKh9Jr4H/gVBTCdMjH/27uAx1zU797i595TV1YJE21/sgkmCKT9VBfE5QzWt20c860rZPetUBhl/YoALL7+8h7OnfMgI+MrQTHucsuN1hdQyNDKC9HF0BouC9ws/nROqF2Ib+h8ddthk9Ti5igScLPGAu4V9hZZrpwNADI7m1si4P0xvStFcNjMNuMm4B8mWjU8xF1c55eH3HStNLKvon7qYu8YcbPu57Plr/LqoLhkSqTHQReYTTP1mxGObBGQx/fpOvJ8zKQTvN5G5YkhU9NT1za67Sq+WkD18Ft3FSqTiqvKFLM8MI+ZmVmCKjesVC76KN39AtHjawODETFw9eewDp4tmJ5Hya+6ErQqRoSAri5tLmB7chCvqdFM8cZx4hycM5zkL0GvZRVsDCwQ3Jp5Wu0fiNJ5Q3CCCycEaqQzcaqDVXDiTaa2aixc8=
    • smithi169.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCxZCaBGiJPfPinfYrfxlp21htuPFY6wpZSylECU3yVy/QNkGLmpqBvq14cwHWEAFgt/BgslHu0CzsCw1+w7SQTZw2/W5YXMDDRixlTXv9h6Fo/qWXApsiTZkdnpPu5ewFneE5c9LXD9BCmu8o1ouoA6tf2azdNi4nEs3dD/O2XVNThVF9z30oAK8p9RGGApHxwfmnar+JoHO+X9xOxQM4Suyid92+Xoj3RkeZTcwx3NP0kxF1FRbHn2uNJv73OHBYEm0g3Qh9i0F9fAnqxy/jk3NSdC4Aix6IYPx8SJk+lEX9z0qy+fk6z/OCStrPg3Fxb1AnTqrfwZE1DXht17rslSdXnTrmijvU5I9AnGQK5J4WPE2scyircHh3nKWzuObcwBzFb14CD4nbJz8xTdk6tXje+hHkBmmzedOWBlRVHm4ZnAv48TkWKoA81uPARdLk4ETwyen7c3ckF3b49Elt3KouuLaKP4ZjwbJuCIws5z9L5+1GmrbxSLYH2tao/jRc=
  • tasks:
    • install:
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
          • cephfs-top
          • cephfs-mirror
        • rpm:
          • python3-cephfs
          • cephfs-top
          • cephfs-mirror
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
          • network-manager
          • iproute2
          • util-linux
          • dump
          • indent
          • libaio-dev
          • libtool-bin
          • uuid-dev
          • xfslibs-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
          • NetworkManager
          • iproute
          • util-linux
          • libacl-devel
          • libaio-devel
          • libattr-devel
          • libtool
          • libuuid-devel
          • xfsdump
          • xfsprogs
          • xfsprogs-devel
          • libaio-devel
          • libtool
          • libuuid-devel
          • xfsprogs-devel
    • ceph:
    • kclient:
    • check-counter:
      • workunit:
        • clients:
          • all:
            • fs/test_o_trunc.sh
    • teuthology_branch: main
    • verbose: True
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2022-09-20 04:20:52
    • started: 2022-09-20 11:25:29
    • updated: 2022-09-20 23:33:55
    • status_class: danger
    • runtime: 12:08:26