Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 6797019 2022-04-19 14:17:14 2022-04-19 14:46:41 1197 smithi master centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi066 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1a8eb52e-bfed-11ec-8c38-001a4aab830c -e sha1=2194423fe78ea1121b8a233f4f8fc979c1532a43 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

pass 6797021 2022-04-19 14:17:15 2022-04-19 14:19:49 2022-04-19 14:40:12 0:20:23 0:15:02 0:05:21 smithi master centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm} 1
fail 6797022 2022-04-19 14:17:16 2022-04-19 14:19:49 2022-04-19 14:30:03 0:10:14 0:05:07 0:05:07 smithi master centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi053 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2194423fe78ea1121b8a233f4f8fc979c1532a43 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

pass 6797025 2022-04-19 14:17:17 2022-04-19 14:55:14 1537 smithi master centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{pg-warn whitelist_health whitelist_wrongly_marked_down} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
pass 6797026 2022-04-19 14:17:18 2022-04-19 14:22:01 2022-04-19 14:48:22 0:26:21 0:15:42 0:10:39 smithi master ubuntu 18.04 orch/cephadm/smoke-roleless/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-services/rgw 3-final} 2
pass 6797029 2022-04-19 14:17:19 2022-04-19 14:45:06 835 smithi master centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm} 1
fail 6797030 2022-04-19 14:17:20 2022-04-19 14:23:42 2022-04-19 14:52:48 0:29:06 0:19:31 0:09:35 smithi master centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi087 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f486e9f4-bfed-11ec-8c38-001a4aab830c -e sha1=2194423fe78ea1121b8a233f4f8fc979c1532a43 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

fail 6797033 2022-04-19 14:17:21 2022-04-19 14:26:44 2022-04-19 14:38:14 0:11:30 0:05:10 0:06:20 smithi master centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi103 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2194423fe78ea1121b8a233f4f8fc979c1532a43 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

pass 6797034 2022-04-19 14:17:22 2022-04-19 14:26:44 2022-04-19 14:52:10 0:25:26 0:15:23 0:10:03 smithi master ubuntu 20.04 orch/cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
fail 6797036 2022-04-19 14:17:24 2022-04-19 14:27:14 2022-04-19 14:42:33 0:15:19 0:05:31 0:09:48 smithi master ubuntu 20.04 orch/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/calico rook/master} 1
Failure Reason:

[Errno 2] Cannot find file on the remote 'ubuntu@smithi145.front.sepia.ceph.com': 'rook/cluster/examples/kubernetes/ceph/operator.yaml'

fail 6797038 2022-04-19 14:17:25 2022-04-19 14:27:16 2022-04-19 14:54:53 0:27:37 0:19:16 0:08:21 smithi master centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi052 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3bba4528-bfee-11ec-8c38-001a4aab830c -e sha1=2194423fe78ea1121b8a233f4f8fc979c1532a43 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

fail 6797039 2022-04-19 14:17:26 2022-04-19 14:27:56 2022-04-19 14:48:26 0:20:30 0:14:14 0:06:16 smithi master centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm} 1
Failure Reason:

SELinux denials found on ubuntu@smithi035.front.sepia.ceph.com: ['type=AVC msg=audit(1650379534.748:7669): avc: denied { ioctl } for pid=51084 comm="iptables" path="/var/lib/containers/storage/overlay/af87d315cfa7b4d47305ee2c5184f36359e6d117730d475e7ee15e31f3b727ae/merged" dev="overlay" ino=3408017 scontext=system_u:system_r:iptables_t:s0 tcontext=system_u:object_r:container_file_t:s0:c1022,c1023 tclass=dir permissive=1']

fail 6797040 2022-04-19 14:17:27 2022-04-19 14:27:56 2022-04-19 14:40:44 0:12:48 0:04:59 0:07:49 smithi master centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi094 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2194423fe78ea1121b8a233f4f8fc979c1532a43 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

pass 6797041 2022-04-19 14:17:28 2022-04-19 14:29:47 2022-04-19 15:07:17 0:37:30 0:29:56 0:07:34 smithi master centos 8.stream orch/cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{pg-warn whitelist_health whitelist_wrongly_marked_down} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
fail 6797042 2022-04-19 14:17:29 2022-04-19 14:30:07 2022-04-19 14:54:32 0:24:25 0:13:31 0:10:54 smithi master ubuntu 18.04 orch/cephadm/osds/{0-distro/ubuntu_18.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi136 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:2194423fe78ea1121b8a233f4f8fc979c1532a43 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f7e7a6b4-bfee-11ec-8c38-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

pass 6797043 2022-04-19 14:17:30 2022-04-19 14:31:08 2022-04-19 14:52:01 0:20:53 0:14:00 0:06:53 smithi master centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/classic task/test_cephadm} 1
fail 6797044 2022-04-19 14:17:31 2022-04-19 14:31:08 2022-04-19 14:59:30 0:28:22 0:20:02 0:08:20 smithi master centos 8.stream orch/cephadm/mgr-nfs-upgrade/{0-distro/centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

Command failed on smithi027 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v15 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e588859c-bfee-11ec-8c38-001a4aab830c -e sha1=2194423fe78ea1121b8a233f4f8fc979c1532a43 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

fail 6797045 2022-04-19 14:17:32 2022-04-19 14:31:39 2022-04-19 14:42:54 0:11:15 0:04:55 0:06:20 smithi master centos 8.stream orch/cephadm/workunits/{0-distro/centos_8.stream_container_tools mon_election/connectivity task/test_cephadm_repos} 1
Failure Reason:

Command failed (workunit test cephadm/test_repos.sh) on smithi131 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2194423fe78ea1121b8a233f4f8fc979c1532a43 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'

dead 2022-04-19 14:17:14 2022-04-19 14:17:14