ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/rgw 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_latest} mon_election/connectivity msgr-failures/few rados thrashers/morepggrow thrashosds-health workloads/rbd_cls}
Command failed on smithi049 with status 5: 'sudo systemctl stop ceph-1055bc9a-fb8c-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-stupid openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/fio_4K_rand_read}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/mgr-nfs-upgrade/{0-centos_8.2_kubic_stable 1-bootstrap/16.2.5 1-start 2-nfs 3-upgrade-with-workload 4-final}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke/{distro/ubuntu_20.04 fixed-2 mon_election/classic start}
Command failed on smithi163 with status 5: 'sudo systemctl stop ceph-a255544e-fb8b-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/basic 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async root}
Command failed on smithi083 with status 5: 'sudo systemctl stop ceph-88645ee0-fb8b-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/workunits/{0-distro/centos_8.2_kubic_stable mon_election/connectivity task/test_orch_cli}
Command failed on smithi146 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 98905508-fb8b-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/client-keyring 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/dashboard/{centos_8.2_kubic_stable debug/mgr mon_election/classic random-objectstore$/{filestore-xfs} tasks/e2e}
Command failed on smithi114 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f7e0a894-fb8d-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/master}
Command failed on smithi155 with status 1: 'sudo kubeadm init --node-name smithi155 --token abcdef.p1lajateofu62q44 --pod-network-cidr 10.252.208.0/21'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/iscsi 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-basic-min-osd-mem-target openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/fio_4K_rand_rw}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/with-work/{0-distro/centos_8.3_kubic_stable fixed-2 mode/packaged mon_election/connectivity msgr/async-v2only start tasks/rados_api_tests}
Command failed on smithi167 with status 5: 'sudo systemctl stop ceph-47fb64d0-fb8f-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke/{distro/centos_8.3_kubic_stable fixed-2 mon_election/connectivity start}
Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-b38f4a8c-fb8e-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-singlehost/{0-distro$/{centos_8.3_kubic_stable} 1-start 2-services/rgw 3-final}
Command failed on smithi071 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 64aff9f2-fb8e-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root}
Command failed on smithi117 with status 5: 'sudo systemctl stop ceph-34db479a-fb8e-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/repo_digest 3-start-upgrade 4-wait mon_election/classic}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/mirror 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-bitmap openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/fio_4M_rand_read}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/octopus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_latest} mon_election/classic msgr-failures/osd-delay rados thrashers/none thrashosds-health workloads/snaps-few-objects}
Command failed on smithi111 with status 5: 'sudo systemctl stop ceph-8e323a6e-fb8f-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/nfs-ingress-rgw 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/workunits/{0-distro/centos_8.2_kubic_stable mon_election/classic task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi090 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7546c41ab524b652a8ef9ff4bc8783b116a2b3fb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/nfs-ingress 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/mgr-nfs-upgrade/{0-centos_8.2_kubic_stable 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke/{distro/rhel_8.3_kubic_stable fixed-2 mon_election/classic start}
Command failed on smithi152 with status 5: 'sudo systemctl stop ceph-17d9c070-fb90-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root}
Command failed on smithi175 with status 5: 'sudo systemctl stop ceph-a94a7f00-fb8f-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/nfs-ingress2 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-comp openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/fio_4M_rand_rw}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/with-work/{0-distro/rhel_8.3_kubic_stable fixed-2 mode/root mon_election/classic msgr/async start tasks/rados_python}
Command failed on smithi187 with status 5: 'sudo systemctl stop ceph-53f04466-fb91-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/defaut 3-start-upgrade 4-wait mon_election/connectivity}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/nfs 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/nfs2 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke/{distro/ubuntu_20.04 fixed-2 mon_election/connectivity start}
Command failed on smithi089 with status 5: 'sudo systemctl stop ceph-eef7297c-fb8f-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root}
Command failed on smithi204 with status 5: 'sudo systemctl stop ceph-0b7cb80a-fb90-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-low-osd-mem-target openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/fio_4M_rand_write}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/pacific backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_latest} mon_election/connectivity msgr-failures/fastclose rados thrashers/pggrow thrashosds-health workloads/test_rbd_api}
Command failed on smithi049 with status 5: 'sudo systemctl stop ceph-26521160-fb91-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/workunits/{0-distro/centos_8.2_kubic_stable mon_election/classic task/test_orch_cli}
Command failed on smithi197 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00cf41fc-fb90-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/rgw-ingress 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/rgw 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/with-work/{0-distro/rhel_8.3_kubic_stable fixed-2 mode/packaged mon_election/connectivity msgr/async-v1only start tasks/rados_api_tests}
Command failed on smithi178 with status 5: 'sudo systemctl stop ceph-f566e692-fb91-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/1.6.2}
Command failed on smithi155 with status 1: 'sudo kubeadm init --node-name smithi155 --token abcdef.ofxgw1jenjubqtus --pod-network-cidr 10.252.208.0/21'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/upgrade/parallel/{0-distro$/{ubuntu_20.04} 0-start 1-tasks mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/mgr-nfs-upgrade/{0-centos_8.2_kubic_stable 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/orchestrator_cli/{0-random-distro$/{centos_8.3_kubic_stable} 2-node-mgr orchestrator_cli}
Test failure: test_device_ls (tasks.mgr.test_orchestrator_cli.TestOrchestratorCli)
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke/{distro/centos_8.3_kubic_stable fixed-2 mon_election/classic start}
Command failed on smithi163 with status 5: 'sudo systemctl stop ceph-1294918e-fb91-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-stupid openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/radosbench_4K_rand_read}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/basic 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-singlehost/{0-distro$/{rhel_8.3_kubic_stable} 1-start 2-services/basic 3-final}
Command failed on smithi071 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 33fa783e-fb91-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v1only root}
Command failed on smithi136 with status 5: 'sudo systemctl stop ceph-ffeb6508-fb90-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/upgrade/{1-start-distro/1-start-centos_8.3-octopus 2-repo_digest/defaut 3-start-upgrade 4-wait mon_election/classic}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/client-keyring 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/iscsi 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-basic-min-osd-mem-target openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/radosbench_4K_seq_read}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/workunits/{0-distro/centos_8.2_kubic_stable mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi091 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7546c41ab524b652a8ef9ff4bc8783b116a2b3fb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/pacific backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_latest} mon_election/classic msgr-failures/few rados thrashers/careful thrashosds-health workloads/cache-snaps}
Command failed on smithi090 with status 5: 'sudo systemctl stop ceph-ef0796e2-fb92-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke/{distro/rhel_8.3_kubic_stable fixed-2 mon_election/connectivity start}
Command failed on smithi165 with status 5: 'sudo systemctl stop ceph-8062d6de-fb92-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root}
Command failed on smithi161 with status 5: 'sudo systemctl stop ceph-1ca98a48-fb92-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/mirror 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/classic msgr/async-v2only start tasks/rados_python}
Command failed on smithi105 with status 5: 'sudo systemctl stop ceph-7450379c-fb92-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/nfs-ingress-rgw 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/repo_digest 3-start-upgrade 4-wait mon_election/connectivity}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-bitmap openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/radosbench_4M_rand_read}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/nfs-ingress 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/mgr-nfs-upgrade/{0-centos_8.2_kubic_stable 1-bootstrap/16.2.5 1-start 2-nfs 3-upgrade-with-workload 4-final}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke/{distro/ubuntu_20.04 fixed-2 mon_election/classic start}
Command failed on smithi043 with status 5: 'sudo systemctl stop ceph-94b604b2-fb92-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async root}
Command failed on smithi132 with status 5: 'sudo systemctl stop ceph-aa59faf8-fb92-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/nfs-ingress2 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/workunits/{0-distro/centos_8.2_kubic_stable mon_election/connectivity task/test_orch_cli}
Command failed on smithi058 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 99939292-fb92-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-comp openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/radosbench_4M_seq_read}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/nfs 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/dashboard/{centos_8.2_kubic_stable debug/mgr mon_election/connectivity random-objectstore$/{bluestore-bitmap} tasks/e2e}
Command failed on smithi016 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ce100d48-fb92-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/none 3-final cluster/3-node k8s/1.21 net/calico rook/master}
Command failed on smithi103 with status 1: 'sudo kubeadm init --node-name smithi103 --token abcdef.p9zk528sum13w7t2 --pod-network-cidr 10.251.48.0/21'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_latest} mon_election/connectivity msgr-failures/osd-delay rados thrashers/default thrashosds-health workloads/radosbench}
Command failed on smithi135 with status 5: 'sudo systemctl stop ceph-2376eca6-fb94-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/nfs2 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/with-work/{0-distro/centos_8.3_kubic_stable fixed-2 mode/packaged mon_election/connectivity msgr/async start tasks/rados_api_tests}
Command failed on smithi164 with status 5: 'sudo systemctl stop ceph-56ccd3ae-fb94-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke/{distro/centos_8.3_kubic_stable fixed-2 mon_election/connectivity start}
Command failed on smithi120 with status 5: 'sudo systemctl stop ceph-f08fdd34-fb93-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-singlehost/{0-distro$/{ubuntu_20.04} 1-start 2-services/rgw 3-final}
Command failed on smithi104 with status 127: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3173ce9c-fb93-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v1only root}
Command failed on smithi193 with status 5: 'sudo systemctl stop ceph-6b7e44f0-fb93-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/defaut 3-start-upgrade 4-wait mon_election/classic}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-low-osd-mem-target openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/radosbench_4M_write}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/rgw-ingress 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/rgw 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/workunits/{0-distro/centos_8.2_kubic_stable mon_election/classic task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi064 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7546c41ab524b652a8ef9ff4bc8783b116a2b3fb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/mgr-nfs-upgrade/{0-centos_8.2_kubic_stable 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke/{distro/rhel_8.3_kubic_stable fixed-2 mon_election/classic start}
Command failed on smithi037 with status 5: 'sudo systemctl stop ceph-5bb076a0-fb94-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/basic 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-stupid openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/radosbench_omap_write}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v2only root}
Command failed on smithi204 with status 5: 'sudo systemctl stop ceph-23476116-fb94-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/client-keyring 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v2only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_latest} mon_election/classic msgr-failures/fastclose rados thrashers/mapgap thrashosds-health workloads/rbd_cls}
Command failed on smithi022 with status 5: 'sudo systemctl stop ceph-527bf996-fb95-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/with-work/{0-distro/centos_8.3_kubic_stable fixed-2 mode/root mon_election/classic msgr/async-v1only start tasks/rados_python}
Command failed on smithi184 with status 5: 'sudo systemctl stop ceph-3f38b81a-fb95-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/upgrade/{1-start-distro/1-start-centos_8.3-octopus 2-repo_digest/repo_digest 3-start-upgrade 4-wait mon_election/connectivity}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/iscsi 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke/{distro/ubuntu_20.04 fixed-2 mon_election/connectivity start}
Command failed on smithi109 with status 5: 'sudo systemctl stop ceph-be218ad6-fb94-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-basic-min-osd-mem-target openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/sample_fio}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async root}
Command failed on smithi163 with status 5: 'sudo systemctl stop ceph-fe4d0586-fb94-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/mirror 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/workunits/{0-distro/centos_8.2_kubic_stable mon_election/classic task/test_orch_cli}
Command failed on smithi136 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid efbe388c-fb94-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/nfs-ingress-rgw 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/nfs-ingress 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/3-node k8s/1.21 net/calico rook/1.6.2}
Command failed on smithi016 with status 1: 'sudo kubeadm init --node-name smithi016 --token abcdef.i9a7msf1jn50kvy9 --pod-network-cidr 10.248.120.0/21'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/upgrade/parallel/{0-distro$/{centos_8.3_kubic_stable} 0-start 1-tasks mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/with-work/{0-distro/rhel_8.3_kubic_stable fixed-2 mode/packaged mon_election/connectivity msgr/async-v2only start tasks/rados_api_tests}
Command failed on smithi187 with status 5: 'sudo systemctl stop ceph-08b0ed06-fb97-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-comp-zstd rados tasks/rados_cls_all validater/valgrind}
Command failed (workunit test cls/test_cls_rgw.sh) on smithi131 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7546c41ab524b652a8ef9ff4bc8783b116a2b3fb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_rgw.sh'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/mgr-nfs-upgrade/{0-centos_8.2_kubic_stable 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-bitmap openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/sample_radosbench}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/orchestrator_cli/{0-random-distro$/{rhel_8.3_kubic_stable} 2-node-mgr orchestrator_cli}
Test failure: test_device_ls (tasks.mgr.test_orchestrator_cli.TestOrchestratorCli)
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/few objectstore/bluestore-hybrid rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/mapgap thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke/{distro/centos_8.3_kubic_stable fixed-2 mon_election/classic start}
Command failed on smithi135 with status 5: 'sudo systemctl stop ceph-6974b60e-fb9c-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_latest} mon_election/connectivity msgr-failures/few rados thrashers/morepggrow thrashosds-health workloads/snaps-few-objects}
Command failed on smithi071 with status 5: 'sudo systemctl stop ceph-d1a557c4-fb9c-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-singlehost/{0-distro$/{ubuntu_20.04} 1-start 2-services/basic 3-final}
Command failed on smithi204 with status 127: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:7546c41ab524b652a8ef9ff4bc8783b116a2b3fb shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e8555768-fb9b-11eb-8c24-001a4aab830c -- ceph-volume lvm zap /dev/vg_nvme/lv_4'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v1only root}
Command failed on smithi037 with status 5: 'sudo systemctl stop ceph-2d33b03c-fb9c-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/defaut 3-start-upgrade 4-wait mon_election/classic}
hit max job timeout
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/nfs-ingress2 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-comp openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/fio_4K_rand_read}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.3
rados/cephadm/smoke-roleless/{0-distro/centos_8.3_kubic_stable 1-start 2-services/nfs 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/workunits/{0-distro/centos_8.2_kubic_stable mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi137 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7546c41ab524b652a8ef9ff4bc8783b116a2b3fb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke-roleless/{0-distro/rhel_8.3_kubic_stable 1-start 2-services/nfs2 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
rhel 8.3
rados/cephadm/smoke/{distro/rhel_8.3_kubic_stable fixed-2 mon_election/connectivity start}
Command failed on smithi109 with status 5: 'sudo systemctl stop ceph-7cebc0c8-fb9d-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
centos 8.2
rados/cephadm/thrash/{0-distro/centos_8.2_kubic_stable 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v2only root}
Command failed on smithi089 with status 5: 'sudo systemctl stop ceph-3459195a-fb9d-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/smoke-roleless/{0-distro/ubuntu_20.04 1-start 2-services/rgw-ingress 3-final}
reached maximum tries (180) after waiting for 180 seconds
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/classic msgr/async start tasks/rados_python}
Command failed on smithi049 with status 5: 'sudo systemctl stop ceph-cbccda10-fb9d-11eb-8c24-001a4aab830c@mon.b'
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-low-osd-mem-target openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/fio_4K_rand_rw}
wip-yuri3-testing-2021-08-09-1006
wip-yuri3-testing-2021-08-09-1006
master
smithi
ubuntu 20.04
rados/cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-start-upgrade 4-wait mon_election/connectivity}
hit max job timeout