ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} tasks/rados_api_tests}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/filestore-xfs rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/none thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/calico rook/1.7.2}
Command failed on smithi003 with status 1: 'kubectl create -f rook/cluster/examples/kubernetes/ceph/crds.yaml -f rook/cluster/examples/kubernetes/ceph/common.yaml -f operator.yaml'
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
rhel 8.6
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/off mon_election/classic task/test_cephadm}
SELinux denials found on ubuntu@smithi038.front.sepia.ceph.com: ['type=AVC msg=audit(1670031864.762:19257): avc: denied { ioctl } for pid=112869 comm="iptables" path="/var/lib/containers/storage/overlay/5614d62792e30c52265d4a41b5c7de2e1abf512d120a7039e36268e1faef9dce/merged" dev="overlay" ino=3540579 scontext=system_u:system_r:iptables_t:s0 tcontext=system_u:object_r:container_file_t:s0:c1022,c1023 tclass=dir permissive=1', 'type=AVC msg=audit(1670031864.809:19259): avc: denied { ioctl } for pid=112887 comm="iptables" path="/var/lib/containers/storage/overlay/5614d62792e30c52265d4a41b5c7de2e1abf512d120a7039e36268e1faef9dce/merged" dev="overlay" ino=3540579 scontext=system_u:system_r:iptables_t:s0 tcontext=system_u:object_r:container_file_t:s0:c1022,c1023 tclass=dir permissive=1']
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_cephadm_repos}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-basic-min-osd-mem-target openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/radosbench_4K_seq_read}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest} thrashers/many workloads/rados_mon_workunits}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/ceph-post-file mon_election/classic rados supported-random-distro$/{centos_8}}
Command failed (workunit test post-file.sh) on smithi189 with status 255: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=0a30a5d92d67eb7011204915623f977744e8b400 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/post-file.sh'
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/singleton/{all/resolve_stuck_peering mon_election/classic msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest}}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
centos 8.stream
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/few objectstore/bluestore-comp-lz4 rados recovery-overrides/{default} supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/snaps-few-objects-balanced}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/crushdiff mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} tasks/rados_stress_watch}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
centos 8.stream
rados/singleton/{all/test-crash mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{centos_8}}
Command failed (workunit test rados/test_crash.sh) on smithi132 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=0a30a5d92d67eb7011204915623f977744e8b400 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_crash.sh'
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
centos 8.stream
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/fastclose objectstore/bluestore-stupid rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=3-m=1}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-bitmap openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/radosbench_4M_rand_read}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/multimon/{clusters/6 mon_election/connectivity msgr-failures/many msgr/async-v2only no_pools objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} tasks/mon_clock_with_skews}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools_crun agent/on mon_election/connectivity task/test_nfs}
Test failure: test_cluster_set_reset_user_config (tasks.cephfs.test_nfs.TestNFS)
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/flannel rook/master}
'check osd count' reached maximum tries (90) after waiting for 900 seconds
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools agent/on mon_election/connectivity task/test_cephadm}
SELinux denials found on ubuntu@smithi134.front.sepia.ceph.com: ['type=AVC msg=audit(1670032300.207:19364): avc: denied { ioctl } for pid=123661 comm="iptables" path="/var/lib/containers/storage/overlay/d5661490182e92735098c4a6dcb6f2a3753ff3e7005fe77f75e10cc646c614ee/merged" dev="overlay" ino=3412491 scontext=system_u:system_r:iptables_t:s0 tcontext=system_u:object_r:container_file_t:s0:c1022,c1023 tclass=dir permissive=1']
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/host rook/1.7.2}
Command failed on smithi093 with status 1: 'kubectl create -f rook/cluster/examples/kubernetes/ceph/crds.yaml -f rook/cluster/examples/kubernetes/ceph/common.yaml -f operator.yaml'
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-bitmap openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/sample_radosbench}
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}}
Command failed on smithi002 with status 1: 'TESTDIR=/home/ubuntu/cephtest bash -s'
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
rhel 8.6
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/off mon_election/classic task/test_nfs}
Test failure: test_cluster_set_reset_user_config (tasks.cephfs.test_nfs.TestNFS)
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/ceph-post-file mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test post-file.sh) on smithi055 with status 255: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=0a30a5d92d67eb7011204915623f977744e8b400 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/post-file.sh'
wip-yuri6-testing-2022-11-23-1348
wip-yuri6-testing-2022-11-23-1348
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/calico rook/master}
'check osd count' reached maximum tries (90) after waiting for 900 seconds