Description: rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/cache-pool-snaps-readproxy}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2023-06-26_18:56:05-rados-wip-yuri7-testing-2023-06-23-1022-distro-default-smithi/7317004/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=b62bc1ff3dea45f6845b0002f1fc01ac

Failure Reason:

{'smithi067.front.sepia.ceph.com': {'changed': True, 'msg': 'All items completed', 'results': [{'_ansible_item_label': 'ceph/python3-asyncssh', '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': True, 'cmd': ['dnf', '-y', 'copr', 'enable', 'ceph/python3-asyncssh'], 'delta': '0:02:11.718928', 'end': '2023-06-26 22:27:19.774156', 'failed': True, 'invocation': {'module_args': {'_raw_params': 'dnf -y copr enable ceph/python3-asyncssh', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'item': 'ceph/python3-asyncssh', 'msg': 'non-zero return code', 'rc': 1, 'start': '2023-06-26 22:25:08.055228', 'stderr': 'Enabling a Copr repository. Please note that this repository is not part\nof the main distribution, and quality may vary.\n\nThe Fedora Project does not exercise any power over the contents of\nthis repository beyond the rules outlined in the Copr FAQ at\n<https://docs.pagure.org/copr.copr/user_documentation.html#what-i-can-build-in-copr>,\nand packages are not held to any quality or security level.\n\nPlease do not file bug reports about these packages in Fedora\nBugzilla. In case of problems, contact the owner of this repository.\nError: Failed to connect to https://copr.fedorainfracloud.org/coprs/ceph/python3-asyncssh/repo/epel-8/dnf.repo?arch=x86_64: Network is unreachable', 'stderr_lines': ['Enabling a Copr repository. Please note that this repository is not part', 'of the main distribution, and quality may vary.', '', 'The Fedora Project does not exercise any power over the contents of', 'this repository beyond the rules outlined in the Copr FAQ at', '<https://docs.pagure.org/copr.copr/user_documentation.html#what-i-can-build-in-copr>,', 'and packages are not held to any quality or security level.', '', 'Please do not file bug reports about these packages in Fedora', 'Bugzilla. In case of problems, contact the owner of this repository.', 'Error: Failed to connect to https://copr.fedorainfracloud.org/coprs/ceph/python3-asyncssh/repo/epel-8/dnf.repo?arch=x86_64: Network is unreachable'], 'stdout': '', 'stdout_lines': []}], 'warnings': ["Consider using the dnf module rather than running 'dnf'. If you need to use command because dnf is insufficient you can add 'warn: false' to this command task or set 'command_warnings=False' in ansible.cfg to get rid of this message."]}}

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2023-06-26_18:56:05-rados-wip-yuri7-testing-2023-06-23-1022-distro-default-smithi/7317004/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2023-06-26_18:56:05-rados-wip-yuri7-testing-2023-06-23-1022-distro-default-smithi/7317004
  • description: rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/cache-pool-snaps-readproxy}
  • duration: 621
  • email: yweinste@redhat.com
  • failure_reason: {'smithi067.front.sepia.ceph.com': {'changed': True, 'msg': 'All items completed', 'results': [{'_ansible_item_label': 'ceph/python3-asyncssh', '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': True, 'cmd': ['dnf', '-y', 'copr', 'enable', 'ceph/python3-asyncssh'], 'delta': '0:02:11.718928', 'end': '2023-06-26 22:27:19.774156', 'failed': True, 'invocation': {'module_args': {'_raw_params': 'dnf -y copr enable ceph/python3-asyncssh', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'item': 'ceph/python3-asyncssh', 'msg': 'non-zero return code', 'rc': 1, 'start': '2023-06-26 22:25:08.055228', 'stderr': 'Enabling a Copr repository. Please note that this repository is not part\nof the main distribution, and quality may vary.\n\nThe Fedora Project does not exercise any power over the contents of\nthis repository beyond the rules outlined in the Copr FAQ at\n,\nand packages are not held to any quality or security level.\n\nPlease do not file bug reports about these packages in Fedora\nBugzilla. In case of problems, contact the owner of this repository.\nError: Failed to connect to https://copr.fedorainfracloud.org/coprs/ceph/python3-asyncssh/repo/epel-8/dnf.repo?arch=x86_64: Network is unreachable', 'stderr_lines': ['Enabling a Copr repository. Please note that this repository is not part', 'of the main distribution, and quality may vary.', '', 'The Fedora Project does not exercise any power over the contents of', 'this repository beyond the rules outlined in the Copr FAQ at', ',', 'and packages are not held to any quality or security level.', '', 'Please do not file bug reports about these packages in Fedora', 'Bugzilla. In case of problems, contact the owner of this repository.', 'Error: Failed to connect to https://copr.fedorainfracloud.org/coprs/ceph/python3-asyncssh/repo/epel-8/dnf.repo?arch=x86_64: Network is unreachable'], 'stdout': '', 'stdout_lines': []}], 'warnings': ["Consider using the dnf module rather than running 'dnf'. If you need to use command because dnf is insufficient you can add 'warn: false' to this command task or set 'command_warnings=False' in ansible.cfg to get rid of this message."]}}
  • flavor:
  • job_id: 7317004
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2023-06-26_18:56:05-rados-wip-yuri7-testing-2023-06-23-1022-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: wip-yuri7-testing-2023-06-23-1022
    • ceph:
      • conf:
        • global:
          • mon client directed command retry: 5
          • mon election default strategy: 3
          • ms bind msgr1: False
          • ms bind msgr2: True
          • ms inject delay max: 1
          • ms inject delay probability: 0.005
          • ms inject delay type: osd
          • ms inject internal delays: 0.002
          • ms inject socket failures: 2500
          • ms type: async
          • osd_max_pg_log_entries: 2
          • osd_min_pg_log_entries: 1
          • osd_object_clean_region_max_num_intervals: 1000
          • osd_pg_log_trim_min: 0
          • osd_pool_default_min_size: 2
          • osd_pool_default_size: 3
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon min osdmap epochs: 50
          • mon osdmap full prune interval: 2
          • mon osdmap full prune min: 15
          • mon osdmap full prune txsize: 2
          • mon scrub interval: 300
          • paxos service trim min: 10
        • osd:
          • bluefs allocator: hybrid
          • bluestore allocator: hybrid
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • bluestore zero block detection: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd backoff on peering: True
          • osd blocked scrub grace period: 3600
          • osd debug reject backfill probability: 0.3
          • osd debug verify cached snaps: True
          • osd debug verify missing on start: True
          • osd delete sleep: 1
          • osd failsafe full ratio: 0.95
          • osd max backfills: 3
          • osd max markdown count: 1000
          • osd mclock override recovery settings: True
          • osd mclock profile: high_recovery_ops
          • osd objectstore: bluestore
          • osd op queue: debug_random
          • osd op queue cut off: debug_random
          • osd scrub max interval: 120
          • osd scrub min interval: 60
          • osd shutdown pgref assert: True
          • osd snap trim sleep: 2
          • osd_max_pg_log_entries: 3000
          • osd_min_pg_log_entries: 3000
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(OSD_SLOW_PING_TIME
        • but it is still running
        • objects unfound and apparently lost
        • overall HEALTH_
        • \(OSDMAP_FLAGS\)
        • \(OSD_
        • \(PG_
        • \(POOL_
        • \(CACHE_POOL_
        • \(SMALLER_PGP_NUM\)
        • \(OBJECT_
        • \(SLOW_OPS\)
        • \(REQUEST_SLOW\)
        • \(TOO_FEW_PGS\)
        • slow request
        • timeout on replica
        • late reservation from
        • must scrub before tier agent can activate
      • sha1: e4919e7f3fd451cf8205569752e8c3a1018b941c
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
      • fs: xfs
    • install:
      • ceph:
        • flavor: default
        • sha1: e4919e7f3fd451cf8205569752e8c3a1018b941c
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
      • min_in: 4
    • workunit:
      • branch: wip-yuri7-testing-2023-06-23-1022
      • sha1: e4919e7f3fd451cf8205569752e8c3a1018b941c
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a']
    • ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'node-exporter.b']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=b62bc1ff3dea45f6845b0002f1fc01ac
  • status: dead
  • success: False
  • branch: wip-yuri7-testing-2023-06-23-1022
  • seed:
  • sha1: e4919e7f3fd451cf8205569752e8c3a1018b941c
  • subset:
  • suite:
  • suite_branch: wip-yuri7-testing-2023-06-23-1022
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: e4919e7f3fd451cf8205569752e8c3a1018b941c
  • targets:
    • smithi038.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDmNS6huyddurar/GcwTDLJ8lvhdoFnESD5cuxmtnJBv6DeoPttovo2yj2xwy3SWaEUcfbIB//mvILsUxQgCgwtn/r+pO5notnrMoL40FmWtt61gujR5BssbCafpYJQeGG1qupE/eXCre+yORh7GuPYJk6uJFY6RK8yupEVdeOLC5UPc2vUZH1F+oacCUwW9cpjBScNzk+qRrjhbvM7SePxsxjjgFsoxad+qF20JxZmOvZfajdNvBAQ8CwlrSTqZbXMwwTf2ByoUYTl7JWU3Ne6NOfBO1CvhPQdQcEJOfHBXRzdrAtj088gB1xTvi+XFG5zcyK2kw7TzpDI84SbN/WI/RwFusDlskSSRQ2bUTuGVE2dbZVhy6Td0b7DRpEysjGgCYBKxEi+/jP2mHXxaQ+YO/sWdN8DKUH/oTvhdWlzGJXad5IGd8nquZQfjY5hdeYZ1D70sdhGV41B75sA8YrTWpPsMmABmh4ekJCa+I7Fx8mpoxEGhBql3bZR8gmLhD0=
    • smithi067.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDIcZhsPRvCyIdvGZTOHqhDrJbafo/rPVibbAwzvevN4P4qNVKH9+UFnx3FI0ZAVlIzdw0S/jQP4D97LySjQlXlV41rwIIXmbTg/s32fvK1t907fvkB7CXG3VKKuqr2osBXrK2l7P80j0Fp+5fiLyOzOv3anvYIAYgmMgqdj/SVTcmXh7+OWRfs0PJqyDt7RDTCOq8wqEBbgRT6AZ4fKct3a2eOywVhFeOskXWZyrbVRIW91+3fHRRj90/cZ9MSDnu5E3e0SmbkMGxqIoj6rotaLXKLLpkpdkEvpXd7XGAKrkiPB1n6qnnbP5MPHkphOKa8oNqseSfMjnQY7a9M6J7JDt45q0wVJtyuxjfEF2QnKDTbGWR5USWei5G6tlm7H/Hoo7RULleUpy8N9LoA3ZkYx5Nyl+k7SbixwiwxrGOx7CUtb1jRMz7d1ELURp2pGsoyR3hOx87NqJDoHukHd960XbDD6+KMqUQzwAIUdiF1eM7c+nf5aSsR+HiWOWsMTd8=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
      • conf:
        • osd:
          • debug monc: 20
    • thrashosds:
      • bluestore_new_sharding: random
      • chance_bluestore_reshard: 1
      • chance_pgnum_grow: 1
      • chance_pgnum_shrink: 1
      • chance_pgpnum_fix: 1
      • timeout: 1200
    • exec:
      • client.0:
        • sudo ceph osd pool create base 4
        • sudo ceph osd pool application enable base rados
        • sudo ceph osd pool create cache 4
        • sudo ceph osd tier add base cache
        • sudo ceph osd tier cache-mode cache readproxy
        • sudo ceph osd tier set-overlay base cache
        • sudo ceph osd pool set cache hit_set_type bloom
        • sudo ceph osd pool set cache hit_set_count 8
        • sudo ceph osd pool set cache hit_set_period 3600
        • sudo ceph osd pool set cache target_max_objects 250
    • rados:
      • clients:
        • client.0
      • objects: 500
      • op_weights:
        • cache_evict: 50
        • cache_flush: 50
        • cache_try_flush: 50
        • copy_from: 50
        • delete: 50
        • read: 100
        • rollback: 50
        • snap_create: 50
        • snap_remove: 50
        • write: 100
      • ops: 4000
      • pool_snaps: True
      • pools:
        • base
  • teuthology_branch: main
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2023-06-26 19:20:03
  • started:
  • updated: 2023-06-26 22:30:42
  • status_class: danger