Description: fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-nautilus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2023-08-15_01:22:18-fs-wip-yuri8-testing-2023-08-11-0834-pacific-distro-default-smithi/7367887/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2023-08-15_01:22:18-fs-wip-yuri8-testing-2023-08-11-0834-pacific-distro-default-smithi/7367887/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2023-08-15_01:22:18-fs-wip-yuri8-testing-2023-08-11-0834-pacific-distro-default-smithi/7367887
  • description: fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-nautilus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}}
  • duration: 0:27:07
  • email: yweinste@redhat.com
  • failure_reason:
  • flavor: default
  • job_id: 7367887
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2023-08-15_01:22:18-fs-wip-yuri8-testing-2023-08-11-0834-pacific-distro-default-smithi
  • nuke_on_error: False
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: wip-yuri8-testing-2023-08-11-0834-pacific
    • ceph:
      • cephfs:
        • max_mds: 1
      • conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • global:
          • bluestore warn on legacy statfs: False
          • bluestore warn on no per pool omap: False
          • mon pg warn min per osd: 0
        • mds:
          • debug mds: 20
          • debug ms: 1
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon op complaint time: 120
          • mon warn on osd down out interval zero: False
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore allocator: bitmap
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 1/20
          • debug bluestore: 1/20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 4/10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
          • osd op complaint time: 180
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • scrub mismatch
        • ScrubResult
        • wrongly marked
        • \(POOL_APP_NOT_ENABLED\)
        • \(SLOW_OPS\)
        • overall HEALTH_
        • \(MON_MSGR2_NOT_ENABLED\)
        • slow request
        • missing required features
      • sha1: 602c756589be2a5f0615079ebe6308eccb10f42c
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd:
            • bdev async discard: True
            • bdev enable discard: True
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 1/20
            • debug bluestore: 1/20
            • debug rocksdb: 4/10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
        • fs: xfs
      • install:
        • ceph:
          • flavor: default
          • sha1: 602c756589be2a5f0615079ebe6308eccb10f42c
      • nuke-on-error: False
      • selinux:
        • whitelist:
          • scontext=system_u:system_r:logrotate_t:s0
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: wip-yuri8-testing-2023-08-11-0834-pacific
        • sha1: 602c756589be2a5f0615079ebe6308eccb10f42c
    • owner: scheduled_yuriw@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mon.b', 'mon.c', 'mgr.x', 'mgr.y', 'mds.a', 'mds.b', 'mds.c', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
      • ['client.0']
      • ['client.1']
    • sentry_event:
    • status: pass
    • success: True
    • branch: wip-yuri8-testing-2023-08-11-0834-pacific
    • seed:
    • sha1: 602c756589be2a5f0615079ebe6308eccb10f42c
    • subset:
    • suite:
    • suite_branch: wip-yuri8-testing-2023-08-11-0834-pacific
    • suite_path:
    • suite_relpath:
    • suite_repo:
    • suite_sha1: 602c756589be2a5f0615079ebe6308eccb10f42c
    • targets:
      • smithi043.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDEf/mmjVBX8fYEDygiqXr97oh0vOh45ONkvTPMG7AcWgDnkFnXVozhFp7yxDwec1NUpWzzbmVeHt9RgSCrsNUSRWa3VQ2MHlFU2qwqm3m5aYJUwGEdo3U7jv8IUNrcQsEEl1QVuEXk6FC0pExrJc8w21/wqBGLaDIy/9GfjnPX13SkJMEZ59x1u68LHaQmZRRG4ufhri51mbVHfpKo+Tgxli5u+ZVilkrlBPazEke9Aja5siAlAAQXWeNG4XYFP/0Bi5tQ2z/GWql1c41T5tpXv0bk8IK7FiYdjSCENUhqedeCRBGtNpl/s6u7ja4TirkHWwgJP2uy/Yzp3pBv6j5wmgXYevR9Oww5cQLx3w0tYqMBpuvxUVx+BnODFKSl6+GxtQOcE4WneGB8Pcl3xyKemhovMv0N55SkHBoN9CZd02SNAZKZLp3Q15FF6v9+TNRlAvSxega1L944MFACb6Rpajm9yCRbZBAv+pPqBeaMASBacxs4yqpwR6QpSjRg5ns=
      • smithi121.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC66PgcB80chFKYHSTqkYI8yVMXrVbdKfkgya5GwIi7BkxGTy0w5g30y9UZettEjElhlSPdM3lu8DLTU0FMmba9EuLEykkB1DDBpIrXR+P51PF/X9TdLnjyBmurfH13PFP50R/ly4QykW4VGR/yexojKXW53zHNAvuR0Itm4UG2WCm4BXng1kysI+ZyIQtiW18RAlZkBv/JZWAhCuCpngpzL1h1cix/HPo79Q0/2XMmrafcNgbIOfgUNKb+leNcQOiGhiAjTUOgKnQ/qZ9aqfB5mvKfLiwE6VO6xajIf0HkJ+i4EVlaEXK8BUTZWUGcso4WkCiiJPMWWpMP+W0sXWmbwONZ5OyMZomJeAAS+TcO1dDWHq5Gv4kBJ3+i5FzALOcgtUKzf3zLMswvdayu7YPhLxh1V4My7RhuGDk5BqnVYjxpVnY193T6oYozDVw6QIEgOadtEkjMNvBo7Lnbghhxg30YR3nf+hGPXxgx2Doc60jS//jiRkObkmGsw+fuzME=
      • smithi186.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC0b0CVVpjX5b+j1QfOJldhteIrZtvlGr+h2UiXGkHs9OJ7jGniGwnKIusNzNjuclNhRnJhX1SqjCIRIZLsWdWtGvawUtY9uLmHmokxb/DwBRjQP3d66qndIMnq7U9evwmKA2PHXku7KM36lJ/5IBTfmmJTS6haM/8LdRc3Ca2bwWSzZ5+0R4vLUYmv64rJjBgfhE9z8LN8lTenWwPPYHrS3VBui9FO7RJ+y/3+j35bkMXlVdmbI33ERVFXBxXsAIvGbamCv0J/xdG5dSNcS3Ll7GUm5vIPwo16ZaBMC5mVEiB66Jnnj/t2ilDrskkEDlu/gWLtkMHCBq+Nj+1WBVUPZ3miAkNYMQMQiQMAiN8+CLWX24Qq2Rn4SXHNlYOeKgmvA8f9TzV72KkDPXaawnQMf2LrjLHki1/K0jZkRlSwWVZPClMCin7oY4K3DEvWarZ9QeWAmNrfpLrFB8GpP+CmwgWNBNq0NpaYsS70GTiQUQMC90gmXJq6YRCASfojlMc=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: True
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
        • branch: nautilus
        • exclude_packages:
          • librados3
          • ceph-mgr-dashboard
          • ceph-mgr-diskprediction-local
          • ceph-mgr-rook
          • ceph-mgr-cephadm
          • cephadm
          • ceph-immutable-object-cache
          • python3-rados
          • python3-rgw
          • python3-rbd
          • python3-cephfs
        • extra_packages:
          • librados2
        • flavor: default
        • sha1: 602c756589be2a5f0615079ebe6308eccb10f42c
      • print: **** done installing nautilus
      • ceph:
        • conf:
          • global:
            • mon warn on pool no app: False
            • ms bind msgr2: False
            • bluestore warn on legacy statfs: False
            • bluestore warn on no per pool omap: False
            • mon pg warn min per osd: 0
          • client:
            • client mount timeout: 600
            • debug client: 20
            • debug ms: 1
            • rados mon op timeout: 900
            • rados osd op timeout: 900
          • mds:
            • debug mds: 20
            • debug ms: 1
            • mds debug frag: True
            • mds debug scatterstat: True
            • mds op complaint time: 180
            • mds verify scatter: True
            • osd op complaint time: 180
            • rados mon op timeout: 900
            • rados osd op timeout: 900
          • mgr:
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
            • mon op complaint time: 120
            • mon warn on osd down out interval zero: False
          • osd:
            • bdev async discard: True
            • bdev enable discard: True
            • bluestore allocator: bitmap
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 1/20
            • debug bluestore: 1/20
            • debug ms: 1
            • debug osd: 20
            • debug rocksdb: 4/10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
            • osd op complaint time: 180
        • log-ignorelist:
          • overall HEALTH_
          • \(FS_
          • \(MDS_
          • \(OSD_
          • \(MON_DOWN\)
          • \(CACHE_POOL_
          • \(POOL_
          • \(MGR_DOWN\)
          • \(PG_
          • \(SMALLER_PGP_NUM\)
          • Monitor daemon marked osd
          • Behind on trimming
          • Manager daemon
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • overall HEALTH_
          • \(FS_DEGRADED\)
          • \(MDS_FAILED\)
          • \(MDS_DEGRADED\)
          • \(FS_WITH_FAILED_MDS\)
          • \(MDS_DAMAGE\)
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • \(FS_INLINE_DATA_DEPRECATED\)
          • overall HEALTH_
          • \(OSD_DOWN\)
          • \(OSD_
          • but it is still running
          • is not responding
          • scrub mismatch
          • ScrubResult
          • wrongly marked
          • \(POOL_APP_NOT_ENABLED\)
          • \(SLOW_OPS\)
          • overall HEALTH_
          • \(MON_MSGR2_NOT_ENABLED\)
          • slow request
          • missing required features
        • cephfs:
          • max_mds: 1
        • flavor: default
        • fs: xfs
        • sha1: 602c756589be2a5f0615079ebe6308eccb10f42c
        • cluster: ceph
      • exec:
        • osd.0:
          • ceph osd set-require-min-compat-client nautilus
      • print: **** done ceph
      • ceph-fuse:
      • print: **** done nautilus client
      • print: **** done fsstress
      • mds_pre_upgrade:
      • print: **** done mds pre-upgrade sequence
      • install.upgrade:
        • mon.a:
        • mon.b:
      • print: **** done install.upgrade both hosts
      • ceph.restart:
        • daemons:
          • mon.*
          • mgr.*
        • mon-health-to-clog: False
        • wait-for-healthy: False
      • ceph.healthy:
      • ceph.restart:
        • daemons:
          • osd.*
        • wait-for-healthy: False
        • wait-for-osds-up: True
      • ceph.stop:
        • mds.*
      • ceph.restart:
        • daemons:
          • mds.*
        • wait-for-healthy: False
        • wait-for-osds-up: True
      • exec:
        • mon.a:
          • ceph versions
          • ceph osd dump -f json-pretty
          • ceph osd require-osd-release pacific
          • for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done
      • ceph.healthy:
      • print: **** done ceph.restart
      • install.upgrade:
        • client.0:
      • print: **** done install.upgrade on client.0
      • ceph-fuse:
        • client.0:
          • mounted: False
        • client.1:
          • skip: True
      • ceph-fuse:
        • client.0:
        • client.1:
          • skip: True
      • print: **** done remount client
      • exec:
        • mon.a:
          • ceph fs dump --format=json-pretty
          • ceph fs required_client_features cephfs add metric_collect
      • sleep:
        • duration: 5
      • fs.clients_evicted:
        • clients:
          • client.0: False
          • client.1: True
      • workunit:
        • clients:
          • client.0:
            • suites/fsstress.sh
        • branch: wip-yuri8-testing-2023-08-11-0834-pacific
        • sha1: 602c756589be2a5f0615079ebe6308eccb10f42c
      • print: **** done fsstress
    • teuthology_branch: main
    • verbose: True
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2023-08-15 01:25:18
    • started: 2023-08-15 05:11:56
    • updated: 2023-08-15 05:51:18
    • status_class: success
    • runtime: 0:39:22
    • wait_time: 0:12:15