Description: rados:cephadm/with-work/{distro/rhel_8.0.yaml fixed-2.yaml mode/root.yaml msgr/async-v2only.yaml start.yaml tasks/rados_api_tests.yaml}

Log: http://qa-proxy.ceph.com/teuthology/sage-2020-03-25_20:16:22-rados:cephadm-wip-sage-testing-2020-03-25-1347-distro-basic-smithi/4889763/teuthology.log

Sentry event: http://sentry.ceph.com/sepia/teuthology/?q=5d31f446d6114fd69d0fc22e5ff0165b

Failure Reason:

Command failed on smithi059 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph-ci/ceph:7652fa0992fd1ad5a27faa33e67a4a6608cdb918 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2cd12c2a-6ed8-11ea-924a-001a4aab830c -- ceph orch daemon add osd smithi059:vg_nvme/lv_1'

  • log_href: http://qa-proxy.ceph.com/teuthology/sage-2020-03-25_20:16:22-rados:cephadm-wip-sage-testing-2020-03-25-1347-distro-basic-smithi/4889763/teuthology.log
  • archive_path: /home/teuthworker/archive/sage-2020-03-25_20:16:22-rados:cephadm-wip-sage-testing-2020-03-25-1347-distro-basic-smithi/4889763
  • description: rados:cephadm/with-work/{distro/rhel_8.0.yaml fixed-2.yaml mode/root.yaml msgr/async-v2only.yaml start.yaml tasks/rados_api_tests.yaml}
  • duration: 0:16:44
  • email:
  • failure_reason: Command failed on smithi059 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph-ci/ceph:7652fa0992fd1ad5a27faa33e67a4a6608cdb918 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2cd12c2a-6ed8-11ea-924a-001a4aab830c -- ceph orch daemon add osd smithi059:vg_nvme/lv_1'
  • flavor: basic
  • job_id: 4889763
  • kernel:
    • sha1: distro
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: sage-2020-03-25_20:16:22-rados:cephadm-wip-sage-testing-2020-03-25-1347-distro-basic-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 8.0
  • overrides:
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • sha1: 7652fa0992fd1ad5a27faa33e67a4a6608cdb918
      • branch: wip-sage-testing-2020-03-25-1347
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • reached quota
        • but it is still running
        • overall HEALTH_
        • \(POOL_FULL\)
        • \(SMALLER_PGP_NUM\)
        • \(CACHE_POOL_NO_HIT_SET\)
        • \(CACHE_POOL_NEAR_FULL\)
        • \(POOL_APP_NOT_ENABLED\)
        • \(PG_AVAILABILITY\)
        • \(PG_DEGRADED\)
      • conf:
        • client:
          • debug ms: 1
        • global:
          • ms bind msgr1: False
          • ms type: async
          • ms bind msgr2: True
        • osd:
          • debug osd: 25
          • debug filestore: 20
          • debug journal: 20
          • debug ms: 20
          • osd shutdown pgref assert: True
        • mon:
          • mon warn on pool no app: False
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
      • sha1: 7652fa0992fd1ad5a27faa33e67a4a6608cdb918
    • cephadm:
      • cephadm_mode: root
    • install:
      • ceph:
        • sha1: 7652fa0992fd1ad5a27faa33e67a4a6608cdb918
    • admin_socket:
      • branch: wip-sage-testing-2020-03-25-1347
  • owner: scheduled_sage@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a']
    • ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b']
  • sentry_event: http://sentry.ceph.com/sepia/teuthology/?q=5d31f446d6114fd69d0fc22e5ff0165b
  • status: fail
  • success: False
  • branch: wip-sage-testing-2020-03-25-1347
  • seed:
  • sha1: 7652fa0992fd1ad5a27faa33e67a4a6608cdb918
  • subset:
  • suite:
  • suite_branch: wip-sage-testing-2020-03-25-1347
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 7652fa0992fd1ad5a27faa33e67a4a6608cdb918
  • targets:
    • smithi059.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCb+0oSVVRnh+qqM/P0uXu+QoMiKCfKM3wroWCIszLNuTIbSK5S3Y+96nR6lsyiWARvP1QiR9uAXglcbKCggHiONA/fcZT3dD3ZmTr/zp+TSzKDFGqXV3jmiY0WBtwg0oJMZKHkKgMA3NcjTbjW1+8CON8wzXSiCnuVsGaFuvXT2HvATrffd4bUewrnSaM7yX6xUwlTN5JEyt1jA518baov99Vn12i4DXKkOBkMBMepybcxY+iAwPuCP+0SrNyQFhmIje0PWhwLaGRuHoChfvcFr2mvVyefZK14V8m6S1hi8VYQEt5Ctf6GMEIr7a1Zjv1MtakoU1xPzsCcdhtk+FvC2NraAPq8VIGnUQPohTsbtF4OXp/jA2p7Qd3l6Zz+Q+AUuZhCUiYv+Np1Ftcme9NnOqh9Y4kOd+IMEeCJ4yZho09Iied6tSgT8xV5trBhSbNHyTf6CQUpYdLXbjjj8fZp25S54pTAa9yH5vh3AhiQKxhkB2ESfIj5K3qka2EvRG8=
    • smithi044.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC/EDx6ejHKseqaGivXLGBktkEew+HMIEu+C/oFhVdQvM2+bRO1k3wjVhGL89DfqRiyXyamLGNhIZhyoVpAjnuH1k+ARAdytzoZlVh7jV1/0nO0QigQ0uUpCMEN1GlETYhHC4CewY2zTDO4glZ3GcXMTl9UdcitYseZYGCPOjAy1MwW1k6eslhFe+2BGOGzn2yoBv1ZroEZ0m/JrjttTQGTLfAkaTHsDcuVc9Q3JL+Vr7QVHVQJCFGzTsg2OEFmIhAVKp7bPbMuTwzteGVp14UlO0Iuax3h6Eat3xyt5hqTzn/vcPs7fnEj16PSAGLjK31l/fV+HM514onsd3GLbGWNsAVUuVg6u5TZ4TTldSKq5YoAmVyrr+TVdQMXU08S1uK3s+UGbKp5K0gJRrI/s6vlXIPX2v4gmxoSL1vXcun9Ez6PxdBN92YJuvGbfrV2hp0UGSr7MsStv5ujd+5Xvp418svez4HMV90Pdk8ab/VIXZGNYVnzGZp3L5iqgczt66M=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 2
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • cephadm:
      • cluster: ceph
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • reached quota
        • but it is still running
        • overall HEALTH_
        • \(POOL_FULL\)
        • \(SMALLER_PGP_NUM\)
        • \(CACHE_POOL_NO_HIT_SET\)
        • \(CACHE_POOL_NEAR_FULL\)
        • \(POOL_APP_NOT_ENABLED\)
        • \(PG_AVAILABILITY\)
        • \(PG_DEGRADED\)
      • conf:
        • global:
          • ms bind msgr1: False
          • ms type: async
          • ms bind msgr2: True
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • debug ms: 1
        • osd:
          • debug osd: 25
          • debug filestore: 20
          • debug journal: 20
          • debug ms: 20
          • osd shutdown pgref assert: True
        • mon:
          • mon warn on pool no app: False
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
      • cephadm_mode: root
      • sha1: 7652fa0992fd1ad5a27faa33e67a4a6608cdb918
    • workunit:
      • clients:
        • client.0:
          • rados/test.sh
          • rados/test_pool_quota.sh
  • teuthology_branch: master
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2020-03-25 20:16:34
  • started: 2020-03-25 20:18:43
  • updated: 2020-03-25 20:44:43
  • status_class: danger
  • runtime: 0:26:00
  • wait_time: 0:09:16