Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 6998016 2022-08-29 14:57:14 2022-08-29 14:58:30 2022-08-29 15:13:38 0:15:08 0:04:38 0:10:30 smithi main ubuntu 20.04 rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/host rook/master} 1
Failure Reason:

Command failed on smithi087 with status 1: 'kubectl apply -f https://docs.projectcalico.org/manifests/tigera-operator.yaml'

fail 6998017 2022-08-29 14:57:15 2022-08-29 14:58:31 2022-08-29 15:18:01 0:19:30 0:09:17 0:10:13 smithi main rados/cephadm/workunits/{agent/on mon_election/connectivity task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi136 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=925d6d50c6abf38f110c774968b0ed462c9e5c17 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

fail 6998018 2022-08-29 14:57:16 2022-08-29 14:58:31 2022-08-29 15:09:13 0:10:42 0:05:00 0:05:42 smithi main centos 8.stream rados/singleton-nomsgr/{all/librados_hello_world mon_election/classic rados supported-random-distro$/{centos_8}} 1
Failure Reason:

Command failed on smithi131 with status 1: 'sudo yum -y install ceph-test'

fail 6998019 2022-08-29 14:57:17 2022-08-29 14:58:31 2022-08-29 15:14:09 0:15:38 0:04:43 0:10:55 smithi main ubuntu 20.04 rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/calico rook/1.7.2} 1
Failure Reason:

Command failed on smithi093 with status 1: 'kubectl apply -f https://docs.projectcalico.org/manifests/tigera-operator.yaml'

fail 6998020 2022-08-29 14:57:18 2022-08-29 14:58:31 2022-08-29 15:11:12 0:12:41 0:05:35 0:07:06 smithi main centos 8.stream rados/upgrade/parallel/{0-random-distro$/{centos_8.stream_container_tools_crun} 0-start 1-tasks mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}} 2
Failure Reason:

Command failed on smithi170 with status 1: 'sudo yum -y install ceph-test'

fail 6998021 2022-08-29 14:57:19 2022-08-29 14:58:32 2022-08-29 15:09:21 0:10:49 0:04:45 0:06:04 smithi main centos 8.stream rados/valgrind-leaks/{1-start 2-inject-leak/mon centos_latest} 1
Failure Reason:

Command failed on smithi120 with status 1: 'sudo yum -y install ceph-test'

fail 6998022 2022-08-29 14:57:20 2022-08-29 14:58:32 2022-08-29 15:14:18 0:15:46 0:05:51 0:09:55 smithi main rados/cephadm/workunits/{agent/off mon_election/classic task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi094 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=925d6d50c6abf38f110c774968b0ed462c9e5c17 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

fail 6998023 2022-08-29 14:57:21 2022-08-29 14:58:32 2022-08-29 15:16:02 0:17:30 0:06:07 0:11:23 smithi main ubuntu 20.04 rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/flannel rook/master} 3
Failure Reason:

Command failed on smithi018 with status 1: 'kubectl create -f rook/deploy/examples/crds.yaml -f rook/deploy/examples/common.yaml -f operator.yaml'

fail 6998024 2022-08-29 14:57:22 2022-08-29 14:58:33 2022-08-29 15:18:58 0:20:25 0:14:40 0:05:45 smithi main rhel 8.6 rados/singleton-nomsgr/{all/librados_hello_world mon_election/connectivity rados supported-random-distro$/{rhel_8}} 1
Failure Reason:

Command failed on smithi043 with status 1: 'sudo yum -y install ceph-test'

fail 6998025 2022-08-29 14:57:23 2022-08-29 14:58:33 2022-08-29 15:14:29 0:15:56 0:04:38 0:11:18 smithi main ubuntu 20.04 rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/host rook/1.7.2} 1
Failure Reason:

Command failed on smithi178 with status 1: 'kubectl apply -f https://docs.projectcalico.org/manifests/tigera-operator.yaml'

fail 6998026 2022-08-29 14:57:24 2022-08-29 14:58:33 2022-08-29 15:19:41 0:21:08 0:15:01 0:06:07 smithi main rhel 8.6 rados/upgrade/parallel/{0-random-distro$/{rhel_8.6_container_tools_3.0} 0-start 1-tasks mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}} 2
Failure Reason:

Command failed on smithi146 with status 1: 'sudo yum -y install ceph-test'

fail 6998027 2022-08-29 14:57:25 2022-08-29 14:58:34 2022-08-29 15:18:00 0:19:26 0:12:56 0:06:30 smithi main rhel 8.6 rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8} tasks/readwrite} 2
Failure Reason:

Command failed on smithi160 with status 1: 'sudo yum -y install ceph-radosgw ceph-test ceph ceph-base cephadm ceph-immutable-object-cache ceph-mgr ceph-mgr-dashboard ceph-mgr-diskprediction-local ceph-mgr-rook ceph-mgr-cephadm ceph-fuse ceph-volume librados-devel libcephfs2 libcephfs-devel librados2 librbd1 python3-rados python3-rgw python3-cephfs python3-rbd rbd-fuse rbd-mirror rbd-nbd sqlite-devel sqlite-devel sqlite-devel sqlite-devel'

fail 6998028 2022-08-29 14:57:27 2022-08-29 14:58:34 2022-08-29 15:17:41 0:19:07 0:08:50 0:10:17 smithi main rados/cephadm/workunits/{agent/off mon_election/connectivity task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi032 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=925d6d50c6abf38f110c774968b0ed462c9e5c17 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

pass 6998029 2022-08-29 14:57:28 2022-08-29 14:58:35 2022-08-29 15:41:58 0:43:23 0:33:17 0:10:06 smithi main rados/cephadm/workunits/{agent/off mon_election/connectivity task/test_nfs} 1
fail 6998030 2022-08-29 14:57:29 2022-08-29 14:58:35 2022-08-29 15:09:31 0:10:56 0:05:11 0:05:45 smithi main centos 8.stream rados/valgrind-leaks/{1-start 2-inject-leak/osd centos_latest} 1
Failure Reason:

Command failed on smithi107 with status 1: 'sudo yum -y install ceph-test'