Description: rbd/mirror/{base/install clients/{mirror-extra mirror} cluster/{2-node openstack} conf/{disable-pool-app} msgr-failures/few objectstore/bluestore-stupid supported-random-distro$/{centos_8} workloads/rbd-mirror-journal-bootstrap-workunit}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2024-06-11_22:16:02-rbd-reef-distro-default-smithi/7750916/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=90ef52e0876140b391e95f7eda3f395d

Failure Reason:

Command failed on smithi067 with status 1: 'sudo yum install -y kernel'

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2024-06-11_22:16:02-rbd-reef-distro-default-smithi/7750916/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2024-06-11_22:16:02-rbd-reef-distro-default-smithi/7750916
  • description: rbd/mirror/{base/install clients/{mirror-extra mirror} cluster/{2-node openstack} conf/{disable-pool-app} msgr-failures/few objectstore/bluestore-stupid supported-random-distro$/{centos_8} workloads/rbd-mirror-journal-bootstrap-workunit}
  • duration:
  • email: ceph-qa@ceph.com
  • failure_reason: Command failed on smithi067 with status 1: 'sudo yum install -y kernel'
  • flavor:
  • job_id: 7750916
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2024-06-11_22:16:02-rbd-reef-distro-default-smithi
  • nuke_on_error:
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: reef
    • ceph:
      • conf:
        • client:
          • debug rbd: 20
          • debug rbd_mirror: 15
          • log to stderr: False
          • rbd default features: 125
        • client.mirror.0:
          • admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
          • pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
        • client.mirror.1:
          • admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
          • pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
        • client.mirror.2:
          • admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
          • pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
        • client.mirror.3:
          • admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
          • pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
        • client.mirror.4:
          • admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
          • pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
        • client.mirror.5:
          • admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
          • pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
        • client.mirror.6:
          • admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
          • pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
        • global:
          • mon client directed command retry: 5
          • mon warn on pool no app: False
          • ms inject socket failures: 5000
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • bdev async discard: True
          • bdev enable discard: True
          • bluestore allocator: stupid
          • bluestore block size: 96636764160
          • bluestore fsck on mount: True
          • debug bluefs: 1/20
          • debug bluestore: 1/20
          • debug ms: 1
          • debug osd: 20
          • debug rocksdb: 4/10
          • mon osd backfillfull_ratio: 0.85
          • mon osd full ratio: 0.9
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • osd objectstore: bluestore
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(OSD_SLOW_PING_TIME
      • sha1: f1abf795f4fc15bf2f30ac90c811a53ab21d5d65
    • ceph-deploy:
      • bluestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd:
            • bdev async discard: True
            • bdev enable discard: True
            • bluestore block size: 96636764160
            • bluestore fsck on mount: True
            • debug bluefs: 1/20
            • debug bluestore: 1/20
            • debug rocksdb: 4/10
            • mon osd backfillfull_ratio: 0.85
            • mon osd full ratio: 0.9
            • mon osd nearfull ratio: 0.8
            • osd failsafe full ratio: 0.95
            • osd objectstore: bluestore
        • fs: xfs
      • install:
        • ceph:
          • flavor: default
          • sha1: f1abf795f4fc15bf2f30ac90c811a53ab21d5d65
      • selinux:
        • whitelist:
          • scontext=system_u:system_r:logrotate_t:s0
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: reef
        • sha1: 55a56d9f0fe9010ad225380bd1eea252c51d834d
    • owner: scheduled_teuthology@teuthology
    • pid:
    • roles:
      • ['cluster1.mon.a', 'cluster1.mgr.x', 'cluster2.mgr.x', 'cluster1.osd.0', 'cluster1.osd.1', 'cluster1.osd.2', 'cluster1.client.0', 'cluster2.client.0']
      • ['cluster2.mon.a', 'cluster2.osd.0', 'cluster2.osd.1', 'cluster2.osd.2', 'cluster1.client.mirror', 'cluster1.client.mirror.0', 'cluster1.client.mirror.1', 'cluster1.client.mirror.2', 'cluster1.client.mirror.3', 'cluster1.client.mirror.4', 'cluster1.client.mirror.5', 'cluster1.client.mirror.6', 'cluster2.client.mirror', 'cluster2.client.mirror.0', 'cluster2.client.mirror.1', 'cluster2.client.mirror.2', 'cluster2.client.mirror.3', 'cluster2.client.mirror.4', 'cluster2.client.mirror.5', 'cluster2.client.mirror.6']
    • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=90ef52e0876140b391e95f7eda3f395d
    • status: fail
    • success: False
    • branch: reef
    • seed: 1368
    • sha1: f1abf795f4fc15bf2f30ac90c811a53ab21d5d65
    • subset: 120/128
    • suite: rbd
    • suite_branch: reef
    • suite_path: /home/teuthworker/src/git.ceph.com_ceph_55a56d9f0fe9010ad225380bd1eea252c51d834d/qa
    • suite_relpath: qa
    • suite_repo: https://git.ceph.com/ceph.git
    • suite_sha1: 55a56d9f0fe9010ad225380bd1eea252c51d834d
    • targets:
      • smithi044.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOSy13JdULYcsSTsXJ+0E7CenK3XDiVzVgUAuDSQ3tQVHbdGAePIws+9Tjy+bAISrExTBMd+nzqWPAiTN3R42Jg=
      • smithi067.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFQOqt+iY/Vu1RmJcxI3QsDw9zcOyl32w6lQJlDt19xIsP/NG+6Txk0dZlNW1ppzr28TMlvX/QWd2qxUKuLkpFs=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
        • extra_packages:
          • rbd-mirror
      • ceph:
        • cluster: cluster1
      • ceph:
        • cluster: cluster2
      • exec:
        • cluster1.client.mirror.0:
          • sudo ceph --cluster cluster1 auth caps client.mirror.4 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster1 auth caps client.mirror.5 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster1 auth caps client.mirror.6 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster2 auth caps client.mirror.4 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster2 auth caps client.mirror.5 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster2 auth caps client.mirror.6 mon 'profile rbd-mirror' osd 'profile rbd'
      • exec:
        • cluster1.client.mirror.0:
          • sudo ceph --cluster cluster1 auth caps client.mirror mon 'profile rbd-mirror-peer' osd 'profile rbd'
          • sudo ceph --cluster cluster1 auth caps client.mirror.0 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster1 auth caps client.mirror.1 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster1 auth caps client.mirror.2 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster1 auth caps client.mirror.3 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster2 auth caps client.mirror mon 'profile rbd-mirror-peer' osd 'profile rbd'
          • sudo ceph --cluster cluster2 auth caps client.mirror.0 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster2 auth caps client.mirror.1 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster2 auth caps client.mirror.2 mon 'profile rbd-mirror' osd 'profile rbd'
          • sudo ceph --cluster cluster2 auth caps client.mirror.3 mon 'profile rbd-mirror' osd 'profile rbd'
      • workunit:
        • clients:
          • cluster1.client.mirror:
            • rbd/rbd_mirror_bootstrap.sh
        • env:
          • CEPH_ARGS:
          • MIRROR_IMAGE_MODE: journal
          • MIRROR_POOL_MODE: pool
          • RBD_MIRROR_INSTANCES: 1
          • RBD_MIRROR_USE_EXISTING_CLUSTER: 1
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 930
    • user: teuthology
    • queue:
    • posted: 2024-06-11 22:17:08
    • started:
    • updated: 2024-06-30 13:32:04
    • status_class: danger