Description: fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/fs/misc}}

Log: http://qa-proxy.ceph.com/teuthology/rishabh-2023-11-04_04:30:51-fs-rishabh-2023nov3-testing-default-smithi/7447090/teuthology.log

Failure Reason:

Error reimaging machines: Failed to power on smithi111

  • log_href: http://qa-proxy.ceph.com/teuthology/rishabh-2023-11-04_04:30:51-fs-rishabh-2023nov3-testing-default-smithi/7447090/teuthology.log
  • archive_path: /home/teuthworker/archive/rishabh-2023-11-04_04:30:51-fs-rishabh-2023nov3-testing-default-smithi/7447090
  • description: fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/fs/misc}}
  • duration:
  • email:
  • failure_reason: Error reimaging machines: Failed to power on smithi111
  • flavor:
  • job_id: 7447090
  • kernel:
    • flavor: default
    • kdb: True
    • sha1: 5b73517e0e2fa132ea79290e5fc06ea5349b2288
  • last_in_suite: False
  • machine_type: smithi
  • name: rishabh-2023-11-04_04:30:51-fs-rishabh-2023nov3-testing-default-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 8.6
  • overrides:
    • admin_socket:
      • branch: rishabh-2023nov3
    • ceph:
      • cephfs:
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
        • max_mds: 5
        • session_timeout: 300
        • standby_replay: True
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 1
          • mds bal fragment size max: 10000
          • mds bal merge size: 5
          • mds bal split bits: 3
          • mds bal split size: 100
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • mds_bal_replicate_threshold: 1
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • cephsqlite lock renewal timeout: 900000
          • debug cephsqlite: 20
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • bluestore block size: 96636764160
          • bluestore compression mode: aggressive
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd op complaint time: 180
          • osd shutdown pgref assert: True
          • osd_max_omap_entries_per_request: 10
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • \(POOL_APP_NOT_ENABLED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
      • sha1: 826ae4205210243850fc3f7bc3f8aea0d1f5ced2
      • subvols:
        • create: 2
        • subvol_options: --size 25000000000
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • ceph-fuse:
        • client.0:
          • mount_subvol_num: 0
      • check-counter:
        • counters:
          • mds:
            • mds.exported
            • mds.imported
            • min: 3
            • name: mds_cache.dir_update
            • min: 3
            • name: mds_cache.dir_update_receipt
            • mds.root_rsnaps
            • mds_server.req_mksnap_latency.avgcount
            • mds_server.req_rmsnap_latency.avgcount
            • mds.dir_split
      • install:
        • ceph:
          • flavor: default
          • sha1: 826ae4205210243850fc3f7bc3f8aea0d1f5ced2
      • kclient:
        • client.0:
          • mount_subvol_num: 1
      • selinux:
        • whitelist:
          • scontext=system_u:system_r:logrotate_t:s0
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: rishabh-2023nov3
        • sha1: 826ae4205210243850fc3f7bc3f8aea0d1f5ced2
    • owner: scheduled_rishabh@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'mds.g', 'mds.j', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
      • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'mds.h', 'mds.k', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
      • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'mds.i', 'mds.l', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
    • sentry_event:
    • status: dead
    • success:
    • branch: rishabh-2023nov3
    • seed:
    • sha1: 826ae4205210243850fc3f7bc3f8aea0d1f5ced2
    • subset:
    • suite:
    • suite_branch: rishabh-2023nov3
    • suite_path:
    • suite_relpath:
    • suite_repo:
    • suite_sha1: 826ae4205210243850fc3f7bc3f8aea0d1f5ced2
    • targets:
      • smithi098.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBP1X4qp0uCbjlqsXN3hys7R6PXJnZT6o32FKUbvB3meiIdjXz0S1Llof+BeP34T9LSUOt0joKXc9BGL6vSJNVbA=
      • smithi111.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLVvN3+LmzPZ4ixiwyCkS3KZLZ9R6MOPm+kPtaJbl8fExpzbk8TudXCcJWHBL7DGPWFvye2jT6sei9eiX0EUQYs=
      • smithi169.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBN72RUaiLCBL4VlLypqmqebKv5tmVFjhCZcVlnAu/6KfxmImQQoHExj5LTT6tHd1IGIv82/l6A6x/rf1ziGaKhk=
    • tasks:
      • pexec:
        • all:
          • sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
          • sudo dnf -y module reset container-tools
          • sudo dnf -y module install container-tools:rhel8 --allowerasing --nobest
          • sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
      • install:
        • extra_packages:
          • deb:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
          • rpm:
            • python3-cephfs
            • cephfs-top
            • cephfs-mirror
        • extra_system_packages:
          • deb:
            • bison
            • flex
            • libelf-dev
            • libssl-dev
            • network-manager
            • iproute2
            • util-linux
            • dump
            • indent
            • libaio-dev
            • libtool-bin
            • uuid-dev
            • xfslibs-dev
            • postgresql
            • postgresql-client
            • postgresql-common
            • postgresql-contrib
          • rpm:
            • bison
            • flex
            • elfutils-libelf-devel
            • openssl-devel
            • NetworkManager
            • iproute
            • util-linux
            • libacl-devel
            • libaio-devel
            • libattr-devel
            • libtool
            • libuuid-devel
            • xfsdump
            • xfsprogs
            • xfsprogs-devel
            • libaio-devel
            • libtool
            • libuuid-devel
            • xfsprogs-devel
            • postgresql
            • postgresql-server
            • postgresql-contrib
      • cephadm:
        • roleless: False
      • cephadm.shell:
        • mon.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch ls
          • ceph orch host ls
          • ceph orch device ls
      • cephadm.shell:
        • mon.a:
          • ceph fs dump
          • ceph osd dump
      • fs.ready:
        • timeout: 300
      • ceph-fuse:
      • check-counter:
        • exec:
          • mon.a:
            • ceph mgr module enable snap_schedule
            • ceph config set mgr mgr/snap_schedule/allow_m_granularity true
            • ceph config set mgr mgr/snap_schedule/dump_on_update true
            • ceph fs snap-schedule add --fs=cephfs --path=/ --snap_schedule=1m
            • ceph fs snap-schedule retention add --fs=cephfs --path=/ --retention-spec-or-period=6m3h
            • ceph fs snap-schedule status --fs=cephfs --path=/
            • ceph fs snap-schedule list --fs=cephfs --path=/ --recursive=true
            • date +%s > START_TIME
        • full_sequential_finally:
          • exec:
            • mon.a:
              • date +%s > END_TIME
              • START_TIME=$(cat START_TIME); END_TIME=$(cat END_TIME); DIFF_TIME=$((600-(END_TIME-START_TIME))); if [ "$DIFF_TIME" -gt 0 ]; then sleep "$DIFF_TIME"; fi
              • ceph fs snap-schedule status --fs=cephfs --path=/
              • ceph fs snap-schedule list --fs=cephfs --path=/ --recursive=true
        • background_exec:
          • mon.a:
            • while sleep 13; do ceph tell mds.cephfs:0 flush journal; done
        • workunit:
          • clients:
            • all:
              • fs/misc
      • teuthology_branch: main
      • verbose: False
      • pcp_grafana_url:
      • priority:
      • user:
      • queue:
      • posted: 2023-11-04 04:32:11
      • started: 2023-11-04 04:49:54
      • updated: 2023-11-04 04:55:51
      • status_class: danger
      • runtime: 0:05:57