Description: nvmeof/thrash/{base/install centos_latest clusters/3-gateways-1-initiator conf/{disable-pool-app} gateway-initiator-setup/3-subsys-60-namespace thrashers/nvmeof_thrash workloads/fio}

Log: http://qa-proxy.ceph.com/teuthology/sjust-2024-08-25_23:01:12-nvmeof-wip-sjust-nvmeof-testing-nvmeof-reenable-with-59385-2024-08-25-distro-default-smithi/7872853/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=245e6e499eb143d894cdefa48160c338

Failure Reason:

Command failed on smithi016 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c57179276fc0818758d8def8a0709be23969dbeb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 548e827c-6338-11ef-bcd2-c7b262605968 -- ceph orch apply nvmeof mypool --placement '3;smithi016=nvmeof.a;smithi082=nvmeof.b;smithi117=nvmeof.c'"

  • log_href: http://qa-proxy.ceph.com/teuthology/sjust-2024-08-25_23:01:12-nvmeof-wip-sjust-nvmeof-testing-nvmeof-reenable-with-59385-2024-08-25-distro-default-smithi/7872853/teuthology.log
  • archive_path: /home/teuthworker/archive/sjust-2024-08-25_23:01:12-nvmeof-wip-sjust-nvmeof-testing-nvmeof-reenable-with-59385-2024-08-25-distro-default-smithi/7872853
  • description: nvmeof/thrash/{base/install centos_latest clusters/3-gateways-1-initiator conf/{disable-pool-app} gateway-initiator-setup/3-subsys-60-namespace thrashers/nvmeof_thrash workloads/fio}
  • duration: 0:12:42
  • email:
  • failure_reason: Command failed on smithi016 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:c57179276fc0818758d8def8a0709be23969dbeb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 548e827c-6338-11ef-bcd2-c7b262605968 -- ceph orch apply nvmeof mypool --placement '3;smithi016=nvmeof.a;smithi082=nvmeof.b;smithi117=nvmeof.c'"
  • flavor: default
  • job_id: 7872853
  • kernel:
    • kdb: 1
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: sjust-2024-08-25_23:01:12-nvmeof-wip-sjust-nvmeof-testing-nvmeof-reenable-with-59385-2024-08-25-distro-default-smithi
  • nuke_on_error:
  • os_type: centos
  • os_version: 9.stream
  • overrides:
    • admin_socket:
      • branch: wip-sjust-nvmeof-testing-nvmeof-reenable-with-59385-2024-08-25
    • ceph:
      • conf:
        • global:
          • mon warn on pool no app: False
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon down mkfs grace: 300
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • CEPHADM_FAILED_DAEMON
        • is in error state
        • failed cephadm daemon
      • sha1: c57179276fc0818758d8def8a0709be23969dbeb
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
      • install:
        • ceph:
          • flavor: default
          • sha1: c57179276fc0818758d8def8a0709be23969dbeb
      • workunit:
        • branch: wip-sjust-nvmeof-testing-nvmeof-reenable-with-59385-2024-08-25
        • sha1: c57179276fc0818758d8def8a0709be23969dbeb
    • owner: scheduled_sjust@teuthology
    • pid:
    • roles:
      • ['host.a', 'mon.a', 'mgr.x', 'osd.0', 'osd.1', 'client.0', 'ceph.nvmeof.nvmeof.a']
      • ['host.b', 'mon.b', 'osd.2', 'osd.3', 'osd.4', 'client.1', 'ceph.nvmeof.nvmeof.b']
      • ['host.c', 'mon.c', 'osd.5', 'osd.6', 'osd.7', 'client.2', 'ceph.nvmeof.nvmeof.c']
      • ['client.3']
    • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=245e6e499eb143d894cdefa48160c338
    • status: fail
    • success: False
    • branch: wip-sjust-nvmeof-testing-nvmeof-reenable-with-59385-2024-08-25
    • seed: 6538
    • sha1: c57179276fc0818758d8def8a0709be23969dbeb
    • subset:
    • suite: nvmeof
    • suite_branch: wip-sjust-nvmeof-testing-nvmeof-reenable-with-59385-2024-08-25
    • suite_path: /home/teuthworker/src/github.com_ceph_ceph-c_c57179276fc0818758d8def8a0709be23969dbeb/qa
    • suite_relpath: qa
    • suite_repo: https://github.com/ceph/ceph-ci.git
    • suite_sha1: c57179276fc0818758d8def8a0709be23969dbeb
    • targets:
      • smithi016.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBI2rPE8yGADc6SWvm7L/1C3P54jfTT5l235sUdfLR1KigDByhzClawGptbsSGA+bHuAg3p7jREDAs0eMOmkVrWE=
      • smithi082.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKwly+aIcloMNawIyQuBLJSLY6DbJD5zqFN5nsWJnc/vR7qkP2Leocd8wmNPV5FgLjj66ZdtOVoPrxVLLPNwAc4=
      • smithi117.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFjsV31TQy+WAdz4kFk7RrsFxFYbQaRtUZpCyMVPAdc+Y8qDxu9IrqvfPkpI5CaU1abRZWdJSK1+s23lB7hS0FY=
      • smithi130.front.sepia.ceph.com: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCgyE/C1dk4jhFDZ0kbsqUR6T66rRsqMzKO3sh3C/ToKFW25CPoi7hfxvv+m1MQociWCg5ngogMhI8iPGB/BE9E=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: 1
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
        • extra_packages:
          • nvme-cli
        • flavor: default
        • sha1: c57179276fc0818758d8def8a0709be23969dbeb
      • cephadm:
        • watchdog_setup:
        • conf:
          • global:
            • mon warn on pool no app: False
          • mgr:
            • debug mgr: 20
            • debug ms: 1
          • mon:
            • debug mon: 20
            • debug ms: 1
            • debug paxos: 20
            • mon down mkfs grace: 300
          • osd:
            • debug ms: 1
            • debug osd: 20
        • flavor: default
        • log-ignorelist:
          • \(MDS_ALL_DOWN\)
          • \(MDS_UP_LESS_THAN_MAX\)
          • CEPHADM_FAILED_DAEMON
          • is in error state
          • failed cephadm daemon
        • sha1: c57179276fc0818758d8def8a0709be23969dbeb
        • cluster: ceph
        • cephadm_mode: root
      • cephadm.shell:
        • host.a:
          • ceph orch status
          • ceph orch ps
          • ceph orch host ls
          • ceph orch device ls
          • ceph osd lspools
      • nvmeof:
        • client: client.0
        • gateway_config:
          • cli_image: quay.io/ceph/nvmeof-cli:1.2
          • namespaces_count: 20
          • subsystems_count: 3
        • gw_image: quay.io/ceph/nvmeof:1.2
        • rbd:
          • image_name_prefix: myimage
          • pool_name: mypool
      • cephadm.wait_for_service:
        • service: nvmeof.mypool
      • workunit:
        • clients:
          • client.3:
            • rbd/nvmeof_setup_subsystem.sh
            • rbd/nvmeof_basic_tests.sh
        • env:
          • RBD_IMAGE_PREFIX: myimage
          • RBD_POOL: mypool
        • no_coverage_and_limits: True
      • nvmeof.thrash:
        • checker_host: client.3
      • workunit:
        • clients:
          • client.3:
            • rbd/nvmeof_fio_test.sh --rbd_iostat
        • env:
          • IOSTAT_INTERVAL: 10
          • RBD_POOL: mypool
          • RUNTIME: 600
        • no_coverage_and_limits: True
        • timeout: 30m
    • teuthology_branch: main
    • verbose: False
    • pcp_grafana_url:
    • priority: 51
    • user: sjust
    • queue:
    • posted: 2024-08-25 23:01:31
    • started: 2024-08-25 23:05:57
    • updated: 2024-08-25 23:27:47
    • status_class: danger
    • runtime: 0:21:50
    • wait_time: 0:09:08