Description: fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mgr mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down pg_health} tasks/cfuse_workunit_suites_pjd}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2024-09-01_20:24:02-fs-main-distro-default-smithi/7885228/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2024-09-01_20:24:02-fs-main-distro-default-smithi/7885228/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2024-09-01_20:24:02-fs-main-distro-default-smithi/7885228
  • description: fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mgr mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down pg_health} tasks/cfuse_workunit_suites_pjd}
  • duration: 0:15:17
  • email: ceph-qa@ceph.com
  • failure_reason:
  • flavor: default
  • job_id: 7885228
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2024-09-01_20:24:02-fs-main-distro-default-smithi
  • nuke_on_error:
  • os_type: ubuntu
  • os_version: 22.04
  • overrides:
    • admin_socket:
      • branch: main
    • ceph:
      • cephfs:
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
      • cluster-conf:
        • client:
          • client mount timeout: 600
          • debug client: 20
          • debug ms: 1
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mds:
          • debug mds: 20
          • debug mds balancer: 20
          • debug ms: 2
          • mds debug frag: True
          • mds debug scatterstat: True
          • mds op complaint time: 180
          • mds verify scatter: True
          • osd op complaint time: 180
          • rados mon op timeout: 900
          • rados osd op timeout: 900
        • mon:
          • mon op complaint time: 120
          • mon warn on pool no app: False
        • osd:
          • osd op complaint time: 180
      • conf:
        • client:
          • client use faked inos: True
          • fuse set user groups: True
        • mgr:
          • client mount timeout: 30
          • debug client: 20
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon down mkfs grace: 300
        • osd:
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 20
          • debug bluestore: 20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • FS_DEGRADED
        • fs.*is degraded
        • filesystem is degraded
        • FS_INLINE_DATA_DEPRECATED
        • FS_WITH_FAILED_MDS
        • MDS_ALL_DOWN
        • filesystem is offline
        • MDS_DAMAGE
        • MDS_DEGRADED
        • MDS_FAILED
        • MDS_INSUFFICIENT_STANDBY
        • insufficient standby MDS daemons available
        • MDS_UP_LESS_THAN_MAX
        • online, but wants
        • filesystem is online with fewer MDS than max_mds
        • POOL_APP_NOT_ENABLED
        • do not have an application enabled
        • overall HEALTH_
        • Replacing daemon
        • deprecated feature inline_data
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • PG_AVAILABILITY
        • PG_DEGRADED
        • Reduced data availability
        • Degraded data redundancy
      • sha1: 83fc625b564ccf3927977ee1f86e2936b1b884e2
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd:
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 20
            • debug bluestore: 20
            • debug rocksdb: 10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
        • fs: xfs
      • install:
        • ceph:
          • flavor: default
          • sha1: 83fc625b564ccf3927977ee1f86e2936b1b884e2
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: main
        • sha1: 42c1c3d42aaa0b6c9bcbccb51c8b397fb980aa9f
    • owner: scheduled_teuthology@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0']
      • ['mon.b', 'mon.c', 'mgr.x', 'mds.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • sentry_event:
    • status: pass
    • success: True
    • branch: main
    • seed: 655
    • sha1: 83fc625b564ccf3927977ee1f86e2936b1b884e2
    • subset: 141/512
    • suite: fs
    • suite_branch: main
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph_42c1c3d42aaa0b6c9bcbccb51c8b397fb980aa9f/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph.git
    • suite_sha1: 42c1c3d42aaa0b6c9bcbccb51c8b397fb980aa9f
    • targets:
      • smithi096.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBxdiDpbbHwStNZalQJR7A89EH3rd4KHUWfWeDof9fXa5DIplrVTaSuOIeMYUZfdYcgKFrUskIONdAtiU0lE3y8=
      • smithi191.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB4hTzuDsAl1VE/0iG0anobQUSpF2RPmNJmO8dYyFoiP2nw8XL7cchG+RRoxatuBI5dhbTe12U3kKhaklfyNmic=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
        • extra_packages:
          • deb:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
          • rpm:
            • python3-cephfs
            • cephfs-shell
            • cephfs-top
            • cephfs-mirror
        • extra_system_packages:
          • deb:
            • bison
            • flex
            • libelf-dev
            • libssl-dev
            • network-manager
            • iproute2
            • util-linux
            • dump
            • indent
            • libaio-dev
            • libtool-bin
            • uuid-dev
            • xfslibs-dev
            • postgresql
            • postgresql-client
            • postgresql-common
            • postgresql-contrib
          • rpm:
            • bison
            • flex
            • elfutils-libelf-devel
            • openssl-devel
            • NetworkManager
            • iproute
            • util-linux
            • libacl-devel
            • libaio-devel
            • libattr-devel
            • libtool
            • libuuid-devel
            • xfsdump
            • xfsprogs
            • xfsprogs-devel
            • libaio-devel
            • libtool
            • libuuid-devel
            • xfsprogs-devel
            • postgresql
            • postgresql-server
            • postgresql-contrib
        • flavor: default
        • sha1: 83fc625b564ccf3927977ee1f86e2936b1b884e2
      • ceph:
        • create_rbd_pool: False
        • cephfs:
          • ec_profile:
            • m=2
            • k=2
            • crush-failure-domain=osd
        • cluster-conf:
          • client:
            • client mount timeout: 600
            • debug client: 20
            • debug ms: 1
            • rados mon op timeout: 900
            • rados osd op timeout: 900
          • mds:
            • debug mds: 20
            • debug mds balancer: 20
            • debug ms: 2
            • mds debug frag: True
            • mds debug scatterstat: True
            • mds op complaint time: 180
            • mds verify scatter: True
            • osd op complaint time: 180
            • rados mon op timeout: 900
            • rados osd op timeout: 900
          • mon:
            • mon op complaint time: 120
            • mon warn on pool no app: False
          • osd:
            • osd op complaint time: 180
        • conf:
          • client:
            • client use faked inos: True
            • fuse set user groups: True
          • mgr:
            • client mount timeout: 30
            • debug client: 20
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
            • mon down mkfs grace: 300
          • osd:
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 20
            • debug bluestore: 20
            • debug ms: 1
            • debug osd: 20
            • debug rocksdb: 10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
        • flavor: default
        • fs: xfs
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • FS_DEGRADED
          • fs.*is degraded
          • filesystem is degraded
          • FS_INLINE_DATA_DEPRECATED
          • FS_WITH_FAILED_MDS
          • MDS_ALL_DOWN
          • filesystem is offline
          • MDS_DAMAGE
          • MDS_DEGRADED
          • MDS_FAILED
          • MDS_INSUFFICIENT_STANDBY
          • insufficient standby MDS daemons available
          • MDS_UP_LESS_THAN_MAX
          • online, but wants
          • filesystem is online with fewer MDS than max_mds
          • POOL_APP_NOT_ENABLED
          • do not have an application enabled
          • overall HEALTH_
          • Replacing daemon
          • deprecated feature inline_data
          • overall HEALTH_
          • \(OSD_DOWN\)
          • \(OSD_
          • but it is still running
          • is not responding
          • PG_AVAILABILITY
          • PG_DEGRADED
          • Reduced data availability
          • Degraded data redundancy
        • sha1: 83fc625b564ccf3927977ee1f86e2936b1b884e2
        • cluster: ceph
      • ceph-fuse:
      • workunit:
        • clients:
          • all:
            • suites/pjd.sh
        • timeout: 6h
        • branch: main
        • sha1: 42c1c3d42aaa0b6c9bcbccb51c8b397fb980aa9f
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 700
    • user: teuthology
    • queue:
    • posted: 2024-09-01 20:28:34
    • started: 2024-09-02 16:04:48
    • updated: 2024-09-02 16:31:30
    • status_class: success
    • runtime: 0:26:42
    • wait_time: 0:11:25