ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi062 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-5} backoff/peering_and_degraded ceph clusters/{fixed-4 openstack} crc-failures/bad_map_crc_failure d-balancer/on mon_election/classic msgr-failures/osd-delay msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/radosbench}
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/singleton-bluestore/{all/cephtool mon_election/connectivity msgr-failures/none msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_latest}}
Command failed (workunit test cephtool/test.sh) on smithi110 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/reef backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/cache-snaps}
Command failed on smithi131 with status 1: 'sudo yum -y install ceph-radosgw'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-flag}
"2024-04-12T00:33:43.950154+0000 mon.smithi008 (mon.0) 791 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/singleton-nomsgr/{all/admin_socket_output mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
hit max job timeout
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/upgrade/parallel/{0-random-distro$/{centos_9.stream} 0-start 1-tasks mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
"2024-04-12T00:46:31.636012+0000 mon.a (mon.0) 319 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/off fixed-2 mon_election/classic start}
"2024-04-12T00:47:07.554580+0000 mon.a (mon.0) 692 : cluster [WRN] Health check failed: 2 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/many msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} tasks/rados_cls_all}
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/thrash-erasure-code-crush-4-nodes/{arch/x86_64 ceph mon_election/classic msgr-failures/fastclose objectstore/bluestore-comp-zstd rados recovery-overrides/{default} supported-random-distro$/{centos_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=8-m=6-crush}
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/radosbench}
"2024-04-12T01:10:00.000098+0000 mon.a (mon.0) 1265 : cluster [WRN] Health detail: HEALTH_WARN noscrub flag(s) set" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind}
valgrind error: Leak_StillReachable operator new[](unsigned long) UnknownInlinedFun UnknownInlinedFun
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-wait}
"2024-04-12T00:58:17.734661+0000 mon.smithi083 (mon.0) 563 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-5} backoff/normal ceph clusters/{fixed-4 openstack} crc-failures/bad_map_crc_failure d-balancer/upmap-read mon_election/classic msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/snaps-few-objects-redelete}
hit max job timeout
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/none msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test cephtool/test.sh) on smithi113 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
"2024-04-12T01:03:41.739559+0000 mon.smithi115 (mon.0) 792 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/rbd_cls}
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/connectivity random-objectstore$/{bluestore-bitmap} tasks/e2e}
Command failed (workunit test cephadm/test_dashboard_e2e.sh) on smithi008 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_dashboard_e2e.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain}
"2024-04-12T01:04:49.009522+0000 mon.a (mon.0) 381 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"2024-04-12T01:05:13.158113+0000 mon.a (mon.0) 447 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/thrash-erasure-code-crush-4-nodes/{arch/x86_64 ceph mon_election/classic msgr-failures/osd-delay objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_latest} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=8-m=6-crush}
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/valgrind-leaks/{1-start 2-inject-leak/none centos_latest}
valgrind error: Leak_StillReachable operator new[](unsigned long) UnknownInlinedFun UnknownInlinedFun
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-snappy rados tasks/mon_recovery validater/valgrind}
valgrind error: Leak_StillReachable operator new[](unsigned long) UnknownInlinedFun UnknownInlinedFun
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_iscsi_container/{centos_9.stream test_iscsi_container}}
"2024-04-12T01:19:31.142711+0000 mon.a (mon.0) 302 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/fastclose rados thrashers/none thrashosds-health workloads/snaps-few-objects}
Error reimaging machines: Failed to power on smithi071
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/repave-all}
"2024-04-12T01:21:49.218263+0000 mon.smithi071 (mon.0) 801 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-5} backoff/peering ceph clusters/{fixed-4 openstack} crc-failures/default d-balancer/crush-compat mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/pool-snaps-few-objects-redelete}
hit max job timeout
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_monitoring_stack_basic}
"2024-04-12T01:39:58.206716+0000 mon.a (mon.0) 425 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/singleton-bluestore/{all/cephtool mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test cephtool/test.sh) on smithi023 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-add}
"2024-04-12T01:41:37.526431+0000 mon.smithi017 (mon.0) 757 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/singleton-nomsgr/{all/admin_socket_output mon_election/connectivity rados supported-random-distro$/{centos_latest}}
hit max job timeout
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/upgrade/parallel/{0-random-distro$/{centos_9.stream_runc} 0-start 1-tasks mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
"2024-04-12T01:44:23.314876+0000 mon.a (mon.0) 254 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_rgw_multisite}
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/thrash-erasure-code-crush-4-nodes/{arch/x86_64 ceph mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-bitmap rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=jerasure-k=8-m=6-crush}
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zstd rados tasks/rados_cls_all validater/valgrind}
valgrind error: Leak_StillReachable operator new[](unsigned long) UnknownInlinedFun UnknownInlinedFun
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/off fixed-2 mon_election/classic start}
"2024-04-12T01:44:50.361434+0000 mon.a (mon.0) 389 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/mon}
Command failed (workunit test mon/mkfs.sh) on smithi107 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/mon/mkfs.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/pacific backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/osd-delay rados thrashers/careful thrashosds-health workloads/cache-snaps}
"2024-04-12T02:00:00.000074+0000 mon.a (mon.0) 1431 : cluster [WRN] Health detail: HEALTH_WARN 1 osds down; Degraded data redundancy: 139/1684 objects degraded (8.254%), 1 pg degraded" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-flag}
"2024-04-12T01:44:28.152290+0000 mon.smithi106 (mon.0) 787 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_latest}}
Command failed (workunit test cephtool/test.sh) on smithi032 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-5} backoff/normal ceph clusters/{fixed-4 openstack} crc-failures/default d-balancer/read mon_election/connectivity msgr-failures/osd-dispatch-delay msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/snaps-few-objects-redelete}
hit max job timeout
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/quincy backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/radosbench}
"2024-04-12T02:20:00.000114+0000 mon.a (mon.0) 1098 : cluster [WRN] Health detail: HEALTH_WARN 1 osds down; Low space hindering backfill (add storage if this doesn't resolve itself): 2 pgs backfill_toofull; Degraded data redundancy: 3592/84495 objects degraded (4.251%), 2 pgs degraded" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/classic random-objectstore$/{bluestore-comp-zlib} tasks/e2e}
Command failed (workunit test cephadm/test_dashboard_e2e.sh) on smithi071 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_dashboard_e2e.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/thrash-erasure-code-crush-4-nodes/{arch/x86_64 ceph mon_election/classic msgr-failures/few objectstore/bluestore-comp-snappy rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/ec-rados-plugin=jerasure-k=8-m=6-crush}
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-wait}
"2024-04-12T01:51:33.218319+0000 mon.smithi092 (mon.0) 796 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados tasks/rados_api_tests validater/valgrind}
valgrind error: Leak_StillReachable operator new[](unsigned long) UnknownInlinedFun UnknownInlinedFun
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"2024-04-12T01:51:12.026656+0000 mon.a (mon.0) 620 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi119 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/reef backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/rbd_cls}
Command failed on smithi110 with status 1: 'sudo yum -y install ceph-radosgw'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
centos 9.stream
rados/standalone/{supported-random-distro$/{centos_latest} workloads/scrub}
Command failed (workunit test scrub/osd-scrub-repair.sh) on smithi017 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=a5074d4516d566e9d8b6aec912f26afd099de101 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-scrub-repair.sh'
wip-yuri6-testing-2024-04-02-1310
wip-yuri6-testing-2024-04-02-1310
main
smithi
ubuntu 22.04
rados/cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
"2024-04-12T02:12:13.753456+0000 mon.smithi123 (mon.0) 771 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log