Description: fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/kernel_untar_build}}

Log: http://qa-proxy.ceph.com/teuthology/rishabh-2023-08-10_20:16:46-fs-wip-rishabh-2023Aug1-b4-testing-default-smithi/7365799/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=7e927b8743ea4fddaa3c83221b8b3c2b

Failure Reason:

['<https://docs.pagure.org/copr.copr/user_documentation.html#what-i-can-build-in-copr>,', 'Bugzilla. In case of problems, contact the owner of this repository.', "Curl error (7): Couldn't connect to server for https://copr.fedorainfracloud.org/coprs/ceph/python3-asyncssh/repo/epel-8/dnf.repo?arch=x86_64 [Failed to connect to copr.fedorainfracloud.org port 443: Network is unreachable]", 'Enabling a Copr repository. Please note that this repository is not part', 'Please do not file bug reports about these packages in Fedora', 'The Fedora Project does not exercise any power over the contents of', 'and packages are not held to any quality or security level.', 'of the main distribution, and quality may vary.', 'this repository beyond the rules outlined in the Copr FAQ at']

  • log_href: http://qa-proxy.ceph.com/teuthology/rishabh-2023-08-10_20:16:46-fs-wip-rishabh-2023Aug1-b4-testing-default-smithi/7365799/teuthology.log
  • archive_path: /home/teuthworker/archive/rishabh-2023-08-10_20:16:46-fs-wip-rishabh-2023Aug1-b4-testing-default-smithi/7365799
  • description: fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/kernel_untar_build}}
  • duration: 0:14:48
  • email:
  • failure_reason: [',', 'Bugzilla. In case of problems, contact the owner of this repository.', "Curl error (7): Couldn't connect to server for https://copr.fedorainfracloud.org/coprs/ceph/python3-asyncssh/repo/epel-8/dnf.repo?arch=x86_64 [Failed to connect to copr.fedorainfracloud.org port 443: Network is unreachable]", 'Enabling a Copr repository. Please note that this repository is not part', 'Please do not file bug reports about these packages in Fedora', 'The Fedora Project does not exercise any power over the contents of', 'and packages are not held to any quality or security level.', 'of the main distribution, and quality may vary.', 'this repository beyond the rules outlined in the Copr FAQ at']
  • flavor:
  • job_id: 7365799
  • kernel:
    • client:
      • branch: testing
    • flavor: default
    • kdb: True
    • sha1: a7fb1265323db972dd333f71b9a53e9479f62e37
  • last_in_suite: False
  • machine_type: smithi
  • name: rishabh-2023-08-10_20:16:46-fs-wip-rishabh-2023Aug1-b4-testing-default-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 8.6
  • overrides:
    • admin_socket:
      • branch: wip-rishabh-2023Aug1-b4
    • ceph:
      • cephfs:
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
        • max_mds: 5
        • session_timeout: 300
        • standby_replay: True
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • global:
          • ms die on skipped message: False
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 1
          • mds bal fragment size max: 10000
          • mds bal merge size: 5
          • mds bal split bits: 3
          • mds bal split size: 100
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • cephsqlite lock renewal timeout: 900000
          • debug cephsqlite: 20
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd op complaint time: 180
          • osd shutdown pgref assert: True
          • osd_max_omap_entries_per_request: 10
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • slow metadata IO
        • SLOW_OPS
        • slow request
      • sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
      • subvols:
        • create: 2
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
      • fs: xfs
    • ceph-fuse:
      • client.0:
        • mount_subvol_num: 0
    • check-counter:
      • counters:
        • mds:
          • mds.exported
          • mds.imported
          • mds.root_rsnaps
          • mds_server.req_mksnap_latency.avgcount
          • mds_server.req_rmsnap_latency.avgcount
          • mds.dir_split
    • install:
      • ceph:
        • flavor: default
        • sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
    • kclient:
      • client.0:
        • mount_subvol_num: 1
      • mntopts:
        • ms_mode=crc
        • wsync
      • syntax: v1
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
    • workunit:
      • branch: wip-rishabh-2023Aug1-b4
      • sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
  • owner: scheduled_rishabh@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'mds.g', 'mds.j', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
    • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'mds.h', 'mds.k', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
    • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'mds.i', 'mds.l', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=7e927b8743ea4fddaa3c83221b8b3c2b
  • status: dead
  • success: False
  • branch: wip-rishabh-2023Aug1-b4
  • seed:
  • sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
  • subset:
  • suite:
  • suite_branch: wip-rishabh-2023Aug1-b4
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
  • targets:
    • smithi007.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLVvN3+LmzPZ4ixiwyCkS3KZLZ9R6MOPm+kPtaJbl8fExpzbk8TudXCcJWHBL7DGPWFvye2jT6sei9eiX0EUQYs=
    • smithi111.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCpUxo5ay/ucdkX9rleNMm8Ofp24XRXUtJPOK3Ji3XBpTiUgmmwneaY6316+3CG+k4K0p3IebanuyTvZApjm02/dNCvu8yD3rjEpWVgIuqYH8O3lEdN9DXcCX+7xHJE0m18+ZPsJQVsNrNJxY+5zqyNnBc8jpZq2LStsWwPOKPWhauANRkOBFSGDoLppWZbLx/yJs+R0qoNLLK9LpKQTUnqsapXvbD27OpPahzj/qbNxFPPyY6XrrldZ6qEO+EjJOujFLH3gzv0kJu10DCuzxrpz6ston4RPi1O3GgcHnF4Mqr/W9VwZ94UTMh+k4dIFRVqJEKrcwRHa5kFsvH/WhVx0Ygr/MiV/Q86mmV7JT+/MpzIRTtWyei5L9IBeZIE9cwzj+mOk6SD/t1roIVEGVV/uF2AyFDVBPr6F8oZ822OGpUyyaDsWTitPNdTX4TC3zdSrLc9tyDa1P74eH/ZLeTSf+C6pM/cepOhQaa6XzpDiPSuJUm9HOQxUlEAvcqa7Ac=
    • smithi133.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCpUxo5ay/ucdkX9rleNMm8Ofp24XRXUtJPOK3Ji3XBpTiUgmmwneaY6316+3CG+k4K0p3IebanuyTvZApjm02/dNCvu8yD3rjEpWVgIuqYH8O3lEdN9DXcCX+7xHJE0m18+ZPsJQVsNrNJxY+5zqyNnBc8jpZq2LStsWwPOKPWhauANRkOBFSGDoLppWZbLx/yJs+R0qoNLLK9LpKQTUnqsapXvbD27OpPahzj/qbNxFPPyY6XrrldZ6qEO+EjJOujFLH3gzv0kJu10DCuzxrpz6ston4RPi1O3GgcHnF4Mqr/W9VwZ94UTMh+k4dIFRVqJEKrcwRHa5kFsvH/WhVx0Ygr/MiV/Q86mmV7JT+/MpzIRTtWyei5L9IBeZIE9cwzj+mOk6SD/t1roIVEGVV/uF2AyFDVBPr6F8oZ822OGpUyyaDsWTitPNdTX4TC3zdSrLc9tyDa1P74eH/ZLeTSf+C6pM/cepOhQaa6XzpDiPSuJUm9HOQxUlEAvcqa7Ac=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • branch: testing
      • flavor: default
      • kdb: True
      • sha1: a7fb1265323db972dd333f71b9a53e9479f62e37
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • pexec:
      • all:
        • sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
        • sudo dnf -y module reset container-tools
        • sudo dnf -y module install container-tools:rhel8 --allowerasing --nobest
        • sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
    • install:
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
          • cephfs-top
          • cephfs-mirror
        • rpm:
          • python3-cephfs
          • cephfs-top
          • cephfs-mirror
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
          • network-manager
          • iproute2
          • util-linux
          • dump
          • indent
          • libaio-dev
          • libtool-bin
          • uuid-dev
          • xfslibs-dev
          • postgresql
          • postgresql-client
          • postgresql-common
          • postgresql-contrib
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
          • NetworkManager
          • iproute
          • util-linux
          • libacl-devel
          • libaio-devel
          • libattr-devel
          • libtool
          • libuuid-devel
          • xfsdump
          • xfsprogs
          • xfsprogs-devel
          • libaio-devel
          • libtool
          • libuuid-devel
          • xfsprogs-devel
          • postgresql
          • postgresql-server
          • postgresql-contrib
    • cephadm:
      • roleless: False
    • cephadm.shell:
      • mon.a:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
    • cephadm.shell:
      • mon.a:
        • ceph fs dump
        • ceph osd dump
    • fs.ready:
      • timeout: 300
    • kclient:
    • check-counter:
      • fwd_scrub:
        • scrub_timeout: 900
        • sleep_between_iterations: 1
      • exec:
        • mon.a:
          • ceph mgr module enable snap_schedule
          • ceph config set mgr mgr/snap_schedule/allow_m_granularity true
          • ceph config set mgr mgr/snap_schedule/dump_on_update true
          • ceph fs snap-schedule add --fs=cephfs --path=/ --snap_schedule=1M
          • ceph fs snap-schedule retention add --fs=cephfs --path=/ --retention-spec-or-period=6M3h
          • ceph fs snap-schedule status --fs=cephfs --path=/
          • ceph fs snap-schedule list --fs=cephfs --path=/ --recursive=true
          • date +%s > START_TIME
      • full_sequential_finally:
        • exec:
          • mon.a:
            • date +%s > END_TIME
            • START_TIME=$(cat START_TIME); END_TIME=$(cat END_TIME); DIFF_TIME=$((600-(END_TIME-START_TIME))); if [ "$DIFF_TIME" -gt 0 ]; then sleep "$DIFF_TIME"; fi
            • ceph fs snap-schedule status --fs=cephfs --path=/
            • ceph fs snap-schedule list --fs=cephfs --path=/ --recursive=true
      • background_exec:
        • mon.a:
          • while sleep 13; do ceph tell mds.cephfs:0 flush journal; done
      • workunit:
        • clients:
          • all:
            • kernel_untar_build.sh
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2023-08-10 20:17:50
    • started: 2023-08-11 12:28:54
    • updated: 2023-08-11 12:55:09
    • status_class: danger
    • runtime: 0:26:15
    • wait_time: 0:11:27