ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-flag}
"2024-06-03T20:34:54.678080+0000 mon.smithi045 (mon.0) 118 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados tasks/rados_cls_all validater/valgrind}
Command failed (workunit test cls/test_cls_2pc_queue.sh) on smithi136 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7d208bbce2efc11fb0dcecbb271cb2051d1daa58 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_2pc_queue.sh'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
ubuntu 22.04
rados/cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/off fixed-2 mon_election/connectivity start}
"2024-06-03T20:14:24.902037+0000 mon.a (mon.0) 102 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/standalone/{supported-random-distro$/{centos_latest} workloads/scrub}
Command failed (workunit test scrub/osd-scrub-repair.sh) on smithi151 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7d208bbce2efc11fb0dcecbb271cb2051d1daa58 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-scrub-repair.sh'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
ubuntu 22.04
rados/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-wait}
"2024-06-03T20:20:33.935519+0000 mon.smithi082 (mon.0) 116 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
ubuntu 22.04
rados/singleton-bluestore/{all/cephtool mon_election/connectivity msgr-failures/none msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test cephtool/test.sh) on smithi079 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7d208bbce2efc11fb0dcecbb271cb2051d1daa58 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/reef backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/cache-snaps}
Command failed on smithi032 with status 1: 'sudo yum install -y kernel'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/upgrade/parallel/{0-random-distro$/{centos_9.stream_runc} 0-start 1-tasks mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
Command failed on smithi097 with status 1: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --max-attr-len 20000 --op read 100 --op write 0 --op delete 50 --op snap_create 50 --op snap_remove 50 --op rollback 50 --op setattr 25 --op rmattr 25 --op copy_from 50 --op append 100 --pool unique_pool_0'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/off fixed-2 mon_election/classic start}
"2024-06-03T20:44:15.973980+0000 mon.a (mon.0) 113 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/radosbench}
Command failed on smithi044 with status 1: 'sudo yum install -y kernel'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind}
"2024-06-03T21:41:19.606509+0000 mon.a (mon.0) 301 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
ubuntu 22.04
rados/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-add}
"2024-06-03T20:20:40.007994+0000 mon.smithi064 (mon.0) 110 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
Error reimaging machines: Failed to power on smithi160
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-4 openstack} fast/normal mon_election/connectivity msgr-failures/osd-dispatch-delay rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_latest} thrashers/pggrow thrashosds-health workloads/ec-small-objects-overwrites}
hit max job timeout
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
ubuntu 22.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/erasure-code}
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-flag}
"2024-06-03T20:47:26.914286+0000 mon.smithi059 (mon.0) 121 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
ubuntu 22.04
rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/none msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test cephtool/test.sh) on smithi177 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7d208bbce2efc11fb0dcecbb271cb2051d1daa58 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-wait}
Error reimaging machines: Failed to power on smithi032
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
ubuntu 22.04
rados/cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/off fixed-2 mon_election/classic start}
"2024-06-03T20:24:32.426514+0000 mon.a (mon.0) 101 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/valgrind-leaks/{1-start 2-inject-leak/none centos_latest}
reached maximum tries (51) after waiting for 300 seconds
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-snappy rados tasks/mon_recovery validater/valgrind}
valgrind error: Leak_StillReachable operator new[](unsigned long) UnknownInlinedFun UnknownInlinedFun
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
Command failed on smithi117 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:7d208bbce2efc11fb0dcecbb271cb2051d1daa58 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 851fe72a-21eb-11ef-bc9b-c7b262605968 -- ceph-volume lvm zap /dev/nvme4n1'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
ubuntu 22.04
rados/thrash-erasure-code-crush-4-nodes/{arch/x86_64 ceph mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-bitmap rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=jerasure-k=8-m=6-crush}
Error reimaging machines: Failed to power on smithi002
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zstd rados tasks/rados_cls_all validater/valgrind}
valgrind error: Leak_StillReachable operator new[](unsigned long) UnknownInlinedFun UnknownInlinedFun
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/pacific backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/osd-delay rados thrashers/careful thrashosds-health workloads/cache-snaps}
Command failed on smithi105 with status 1: 'sudo yum install -y kernel'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_latest}}
Command failed (workunit test cephtool/test.sh) on smithi029 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7d208bbce2efc11fb0dcecbb271cb2051d1daa58 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/quincy backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/radosbench}
Command failed on smithi016 with status 1: 'sudo yum install -y kernel'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados tasks/rados_api_tests validater/valgrind}
Command failed (workunit test rados/test.sh) on smithi070 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7d208bbce2efc11fb0dcecbb271cb2051d1daa58 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 ALLOW_TIMEOUTS=1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-osd-ec-partial-reads-eio
wip-osd-ec-partial-reads-eio
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-add}
"2024-06-03T20:53:27.973761+0000 mon.smithi115 (mon.0) 121 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log