Description: fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}

Log: http://qa-proxy.ceph.com/teuthology/rishabh-2023-08-10_20:16:46-fs-wip-rishabh-2023Aug1-b4-testing-default-smithi/7365797/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=d5ddf7d9dfe44bbf831b3162240adf4a

Failure Reason:

['<https://docs.pagure.org/copr.copr/user_documentation.html#what-i-can-build-in-copr>,', 'Bugzilla. In case of problems, contact the owner of this repository.', 'Enabling a Copr repository. Please note that this repository is not part', 'Error: Failed to connect to https://copr.fedorainfracloud.org/coprs/ceph/python3-asyncssh/repo/epel-8/dnf.repo?arch=x86_64: Network is unreachable', 'Please do not file bug reports about these packages in Fedora', 'The Fedora Project does not exercise any power over the contents of', 'and packages are not held to any quality or security level.', 'of the main distribution, and quality may vary.', 'this repository beyond the rules outlined in the Copr FAQ at']

  • log_href: http://qa-proxy.ceph.com/teuthology/rishabh-2023-08-10_20:16:46-fs-wip-rishabh-2023Aug1-b4-testing-default-smithi/7365797/teuthology.log
  • archive_path: /home/teuthworker/archive/rishabh-2023-08-10_20:16:46-fs-wip-rishabh-2023Aug1-b4-testing-default-smithi/7365797
  • description: fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
  • duration: 0:08:26
  • email:
  • failure_reason: [',', 'Bugzilla. In case of problems, contact the owner of this repository.', 'Enabling a Copr repository. Please note that this repository is not part', 'Error: Failed to connect to https://copr.fedorainfracloud.org/coprs/ceph/python3-asyncssh/repo/epel-8/dnf.repo?arch=x86_64: Network is unreachable', 'Please do not file bug reports about these packages in Fedora', 'The Fedora Project does not exercise any power over the contents of', 'and packages are not held to any quality or security level.', 'of the main distribution, and quality may vary.', 'this repository beyond the rules outlined in the Copr FAQ at']
  • flavor:
  • job_id: 7365797
  • kernel:
    • flavor: default
    • kdb: True
    • sha1: a7fb1265323db972dd333f71b9a53e9479f62e37
  • last_in_suite: False
  • machine_type: smithi
  • name: rishabh-2023-08-10_20:16:46-fs-wip-rishabh-2023Aug1-b4-testing-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: wip-rishabh-2023Aug1-b4
    • ceph:
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • global:
          • mon pg warn min per osd: 0
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 1
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon op complaint time: 120
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore allocator: bitmap
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 1/20
          • debug bluestore: 1/20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 4/10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd op complaint time: 180
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
      • sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 1/20
          • debug bluestore: 1/20
          • debug rocksdb: 4/10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
      • fs: xfs
    • install:
      • ceph:
        • flavor: default
        • sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
    • kclient:
      • syntax: v1
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • thrashosds:
      • bdev_inject_crash: 2
      • bdev_inject_crash_probability: 0.5
    • workunit:
      • branch: wip-rishabh-2023Aug1-b4
      • sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
  • owner: scheduled_rishabh@teuthology
  • pid:
  • roles:
    • ['host.a', 'client.0', 'osd.0', 'osd.1', 'osd.2']
    • ['host.b', 'client.1', 'osd.3', 'osd.4', 'osd.5']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=d5ddf7d9dfe44bbf831b3162240adf4a
  • status: dead
  • success: False
  • branch: wip-rishabh-2023Aug1-b4
  • seed:
  • sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
  • subset:
  • suite:
  • suite_branch: wip-rishabh-2023Aug1-b4
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 92684807f016ce045ac66e37fdde6f8c47857bcc
  • targets:
    • smithi098.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDiAHdKlnTu+IuZSYeKKfwJkzEDcVv0q1cVQdZz5GWNyvbOoa1T3eKCnnEUo1qFo6q1VgDUBojMrQ+PuFHkvNlY/UVJQnHS1kG8GgbRe7wW4/3xAg2L1SDAiF0OsjPK1gPUsgHcBhTtO1SvnI4oc3PJm4GT74cIaYHGrY4ttJJfQFaePpjUGwR/Vo19cczZIUXLtpaHafzXCwlXBcIAJe0tFg4i/L7E/dwJWCPuddzLriHBM6732gMTba7biZH9TPgUnipetdHYXf+/jfBIlzVJ5WkI6JN++4VKmAC7RZetlcqTEBfBaQrz45xgy13Iuj/IWT3Z3y/1MSd4GnEojfEwi5AKCn2dY05A95bDmRncQDFNdQsOWBhsNAiOuHKKNzFsJpq+Duz8kup5fdwl0snwXto5mzX9aaQ3XTmNjGcRQwO2X36rRcipvKNFMutCzxj/1FooPiEht6sKzVoSlOWyr4JQm3NFrzBemV28ArwpNeDI27GxC88OzcNcrDBSPqk=
    • smithi114.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDlkj159XukVoCZiCC9ixbkfon6ubsYDqUJdgDod+Cl33qVjNQXJ4TdLQH0aeyJMLv6k1w9bWD2QUY7iBd2MAMC8sgcHrF8sgfOQ6IBq8T6kDV/pxWTk9lXpY+6aUpTGVkeUeOhksjMATYXgx4Lp3VvHaqJz3jBg88QugIuOqcZ58B5d1lKpZNkiDhDXoNkNn0lxHoND8pr51wmTSoqXNp5AQzyiHI+MwtdNtq/OZXk6tecIL7E06q9DX3Gh5kBJVgyg1q6yE7bTiq4mDY0t+ttJQUXBrvJQxJn4kDpC7CjvFt8XMCdGB3xCnW9edpz4rqBRR7mhVTTYHQyg6B6OkJkkWWJ2U4nUT8MZRD6qTrX0dzj+op9ukEZ59FipA3nvNBLDsTWmuTscS+RuqLo4c9/6ZJGNv1mIKwqyBizUdkkiqqAasuaCdCH/ciE3ffWo9C2Re6lmJW4BOviyP43l5tF8lwR2E3XXqxCzWRjRbcBWkQG5qnWBfbJHH5DT4vmeVk=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: default
      • kdb: True
      • sha1: a7fb1265323db972dd333f71b9a53e9479f62e37
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • pexec:
      • all:
        • sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup
        • sudo dnf -y module reset container-tools
        • sudo dnf -y module install container-tools --allowerasing --nobest
        • sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf
    • pexec:
      • clients:
        • sudo modprobe -r ceph
        • sudo modprobe ceph disable_send_metrics=on
    • install:
      • exclude_packages:
        • ceph-volume
      • tag: v16.2.4
    • print: **** done install task...
    • cephadm:
      • avoid_pacific_features: True
      • cephadm_branch: v16.2.4
      • cephadm_git_url: https://github.com/ceph/ceph
      • image: quay.io/ceph/ceph:v16.2.4
      • roleless: True
    • print: **** done starting v16.2.4
    • cephadm.shell:
      • host.a:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
    • cephadm.shell:
      • host.a:
        • ceph fs volume create cephfs --placement=4
        • ceph fs dump
    • cephadm.shell:
      • host.a:
        • ceph fs set cephfs max_mds 2
    • cephadm.shell:
      • host.a:
        • ceph fs set cephfs allow_standby_replay true
    • cephadm.shell:
      • host.a:
        • ceph fs set cephfs inline_data false
    • cephadm.shell:
      • host.a:
        • ceph fs dump
        • ceph --format=json fs dump | jq -e ".filesystems | length == 1"
        • while ! ceph --format=json mds versions | jq -e ". | add == 4"; do sleep 1; done
    • fs.pre_upgrade_save:
    • kclient:
    • print: **** done client
    • parallel:
      • upgrade-tasks
      • workload-tasks
    • cephadm.shell:
      • host.a:
        • ceph fs dump
    • fs.post_upgrade_checks:
  • teuthology_branch: main
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2023-08-10 20:17:49
  • started: 2023-08-11 12:28:54
  • updated: 2023-08-11 12:48:11
  • status_class: danger
  • runtime: 0:19:17
  • wait_time: 0:10:51