ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}}
Error reimaging machines: Failed to power on smithi115
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} thrashers/sync workloads/rados_mon_osdmap_prune}
{'smithi203.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}, 'smithi012.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-stupid rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/classic start}
Command failed on smithi008 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/master}
Command failed on smithi115 with status 1: 'sudo kubeadm init --node-name smithi115 --token abcdef.rb088tgj6wrvheaq --pod-network-cidr 10.251.144.0/21'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-services/mirror 3-final}
Command failed on smithi140 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root}
Command failed on smithi090 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/singleton/{all/pg-autoscaler-progress-off mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final}
Command failed on smithi038 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/dashboard/{centos_8.stream_container_tools clusters/{2-node-mgr} debug/mgr mon_election/classic random-objectstore$/{bluestore-comp-zstd} supported-random-distro$/{ubuntu_latest} tasks/dashboard}
Command failed on smithi106 with status 1: 'TESTDIR=/home/ubuntu/cephtest bash -s'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/upgrade/nautilus-x-singleton/{0-cluster/{openstack start} 1-install/nautilus 2-partial-upgrade/firsthalf 3-thrash/default 4-workload/{rbd-cls rbd-import-export readwrite snaps-few-objects} 5-workload/{radosbench rbd_api} 6-finish-upgrade 7-pacific 8-workload/{rbd-python snaps-many-objects} bluestore-bitmap mon_election/classic thrashosds-health ubuntu_18.04}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/cephadm/rbd_iscsi/{base/install cluster/{fixed-3 openstack} pool/datapool supported-random-distro$/{rhel_8} workloads/ceph_iscsi}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/fastclose objectstore/bluestore-bitmap rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/classic start}
Error reimaging machines: Failed to power on smithi083
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs-ingress 3-final}
Error reimaging machines: Failed to power on smithi039
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/small-objects-balanced}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/cephadm/smoke-singlehost/{0-distro$/{ubuntu_20.04} 1-start 2-services/basic 3-final}
Command failed on smithi137 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/singleton/{all/random-eio mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/singleton/{all/rebuild-mondb mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/test_rbd_api}
Command failed on smithi039 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final}
Command failed on smithi115 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi170 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=25247086556727088a4e5f94004449b27369ea05 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8} thrashers/many workloads/rados_mon_workunits}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/ceph-post-file mon_election/classic rados supported-random-distro$/{centos_8}}
Command failed (workunit test post-file.sh) on smithi139 with status 255: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=25247086556727088a4e5f94004449b27369ea05 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/post-file.sh'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/singleton/{all/resolve_stuck_peering mon_election/classic msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm_repos}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/singleton-nomsgr/{all/export-after-evict mon_election/connectivity rados supported-random-distro$/{rhel_8}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/singleton/{all/test-crash mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/luminous-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/few rados thrashers/careful thrashosds-health workloads/radosbench}
Command failed on smithi038 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/fastclose objectstore/bluestore-stupid rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=3-m=1}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/cephadm/osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
Command failed on smithi055 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/connectivity start}
Command failed on smithi146 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/osd-dispatch-delay rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/ec-small-objects-overwrites}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/health-warnings mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async root}
Command failed on smithi134 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-lz4 rados tasks/rados_cls_all validater/lockdep}
Error reimaging machines: Failed to power on smithi106
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/enable mon_election/connectivity objectstore/bluestore-hybrid supported-random-distro$/{centos_8} tasks/failover}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/monthrash/{ceph clusters/9-mons mon_election/connectivity msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} thrashers/one workloads/snaps-few-objects}
{'smithi026.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}, 'smithi006.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/write_fadvise_dontneed}
{'smithi196.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}, 'smithi149.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} tasks/rados_striper}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/none msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/osd-delay objectstore/bluestore-comp-snappy rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
Error reimaging machines: Failed to power on smithi137
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/rook/smoke/{0-distro/ubuntu_18.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/1.6.2}
Command failed on smithi033 with status 1: 'sudo kubeadm init --node-name smithi033 --token abcdef.2104j6mljg4vsd28 --pod-network-cidr 10.249.0.0/21'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/mimic-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/fastclose rados thrashers/mapgap thrashosds-health workloads/snaps-few-objects}
Command failed on smithi016 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
 
rados/cephadm/dashboard/{0-distro/ignorelist_health task/test_e2e}
Failed to fetch package version from https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&sha1=25247086556727088a4e5f94004449b27369ea05
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{default} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8} thrashers/mapgap thrashosds-health workloads/cache-agent-big}
Error reimaging machines: Failed to power on smithi183
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-comp-snappy rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
{'smithi156.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}, 'smithi136.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}}
Stale jobs detected, aborting.
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/singleton/{all/thrash-rados/{thrash-rados thrashosds-health} mon_election/classic msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8}}
Stale jobs detected, aborting.
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/connectivity start}
Command failed on smithi006 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/smoke-singlehost/{0-distro$/{ubuntu_18.04} 1-start 2-services/rgw 3-final}
Command failed on smithi103 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/standalone/{supported-random-distro$/{centos_8} workloads/mgr}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-snappy rados tasks/mon_recovery validater/valgrind}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/classic objectstore/bluestore-low-osd-mem-target supported-random-distro$/{ubuntu_latest} tasks/insights}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8} thrashers/sync-many workloads/pool-create-delete}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-comp-snappy rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/mimic backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/few rados thrashers/morepggrow thrashosds-health workloads/test_rbd_api}
Command failed on smithi089 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/rm-zap-add}
Command failed on smithi088 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/bluestore-comp-zlib rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
Error reimaging machines: Failed to power on smithi164
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm}
Error reimaging machines: Failed to power on smithi134
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/osd-delay rados thrashers/none thrashosds-health workloads/cache-snaps}
Error reimaging machines: SSH connection to smithi164 was lost: "sudo sed -i -e 's/smithi164/smithi164/g' /etc/hosts"
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v2only root}
Stale jobs detected, aborting.
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/cephadm/with-work/{0-distro/rhel_8.4_container_tools_3.0 fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests}
SSH connection to smithi061 was lost: 'sudo DEBIAN_FRONTEND=noninteractive apt-get -y install linux-image-generic'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}}
Stale jobs detected, aborting.
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/fastclose rados thrashers/pggrow thrashosds-health workloads/radosbench}
Stale jobs detected, aborting.
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/classic start}
Stale jobs detected, aborting.
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async root}
Command failed on smithi136 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/ec-small-objects-fast-read}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/singleton/{all/divergent_priors2 mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{ubuntu_18.04} mon_election/classic msgr-failures/few rados thrashers/careful thrashosds-health workloads/rbd_cls}
timed out
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/upgrade/nautilus-x-singleton/{0-cluster/{openstack start} 1-install/nautilus 2-partial-upgrade/firsthalf 3-thrash/default 4-workload/{rbd-cls rbd-import-export readwrite snaps-few-objects} 5-workload/{radosbench rbd_api} 6-finish-upgrade 7-pacific 8-workload/{rbd-python snaps-many-objects} bluestore-bitmap mon_election/connectivity thrashosds-health ubuntu_18.04}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/cephadm/rbd_iscsi/{base/install cluster/{fixed-3 openstack} pool/datapool supported-random-distro$/{rhel_8} workloads/ceph_iscsi}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/classic start}
Command failed on smithi134 with status 2: 'git archive --remote=https://git.ceph.com/ceph-ci.git 25247086556727088a4e5f94004449b27369ea05 src/cephadm/cephadm | tar -xO src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/none 3-final cluster/3-node k8s/1.21 net/calico rook/master}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/with-work/{0-distro/ubuntu_18.04 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-hybrid rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/standalone/{supported-random-distro$/{rhel_8} workloads/mon}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/task/cancel (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi044 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=25247086556727088a4e5f94004449b27369ea05 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} thrashers/mapgap thrashosds-health workloads/redirect_promote_tests}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/monthrash/{ceph clusters/9-mons mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} thrashers/many workloads/rados_mon_osdmap_prune}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/ceph-post-file mon_election/connectivity rados supported-random-distro$/{centos_8}}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v2only root}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/task/cancel (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python}
{'smithi080.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}, 'smithi047.front.sepia.ceph.com': {'changed': False, 'msg': 'All items completed', 'results': [{'_ansible_item_label': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, '_ansible_no_log': False, 'ansible_loop_var': 'item', 'changed': False, 'err': " /dev/vg_nvme: already exists in filesystem\n Run `vgcreate --help' for more information.\n", 'failed': True, 'invocation': {'module_args': {'force': False, 'pesize': '4', 'pv_options': '', 'pvresize': False, 'pvs': ['/dev/nvme0n1'], 'state': 'present', 'vg': 'vg_nvme', 'vg_options': ''}}, 'item': {'key': 'vg_nvme', 'value': {'pvs': '/dev/nvme0n1'}}, 'msg': "Creating volume group 'vg_nvme' failed", 'rc': 3}]}}
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/osd-delay objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm_repos}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/redirect_set_object}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/singleton/{all/max-pg-per-osd.from-mon mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8}}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/cephadm/osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/repave-all}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/luminous backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{ubuntu_18.04} mon_election/connectivity msgr-failures/few rados thrashers/morepggrow thrashosds-health workloads/cache-snaps}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/client-keyring 3-final}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/smoke/{0-nvme-loop distro/ubuntu_18.04 fixed-2 mon_election/connectivity start}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/singleton/{all/max-pg-per-osd.from-replica mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest}}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8} thrashers/one workloads/rados_mon_workunits}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/cephadm/smoke-roleless/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-services/mirror 3-final}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
 
rados/cephadm/dashboard/{0-distro/ignorelist_health task/test_e2e}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/snaps-few-objects-balanced}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-nvme-loop distro/centos_8.stream_container_tools fixed-2 mon_election/connectivity start}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
rhel 8.4
rados/objectstore/{backends/objectcacher-stress supported-random-distro$/{rhel_8}}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/cephadm/smoke-singlehost/{0-distro$/{ubuntu_20.04} 1-start 2-services/rgw 3-final}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-stupid rados recovery-overrides/{default} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/rm-zap-flag}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/fastclose objectstore/filestore-xfs rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 20.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/scrub}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))
wip-yuri3-testing-2022-12-14-0855-pacific
wip-yuri3-testing-2022-12-14-0855-pacific
main
smithi
ubuntu 18.04
rados/rook/smoke/{0-distro/ubuntu_18.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/3-node k8s/1.21 net/calico rook/1.6.2}
Error reimaging machines: HTTPConnectionPool(host='fog.front.sepia.ceph.com', port=80): Max retries exceeded with url: /fog/host (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))