ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/osd}
Command failed (workunit test osd/osd-bluefs-volume-ops.sh) on smithi079 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1ef864504b8875c83ee6c2c5fedc13315bebf7f5 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/osd/osd-bluefs-volume-ops.sh'
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/singleton-nomsgr/{all/osd_stale_reads mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/standalone/{supported-random-distro$/{centos_latest} workloads/scrub}
Command failed (workunit test scrub/osd-scrub-test.sh) on smithi017 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1ef864504b8875c83ee6c2c5fedc13315bebf7f5 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-scrub-test.sh'
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-stupid rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_latest} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
Command failed on smithi045 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd pool create unique_pool_0 16 16 erasure jerasure21profile'
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/singleton-nomsgr/{all/admin_socket_output mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
hit max job timeout
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/upgrade/parallel/{0-random-distro$/{ubuntu_22.04} 0-start 1-tasks mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
"2024-08-30T19:11:18.266957+0000 mon.a (mon.0) 568 : cluster [WRN] Health check failed: Degraded data redundancy: 2/6 objects degraded (33.333%), 1 pg degraded (PG_DEGRADED)" in cluster log
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-5} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/read mon_election/connectivity msgr-failures/osd-dispatch-delay msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_latest} thrashers/careful thrashosds-health workloads/small-objects}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/osd-delay objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/connectivity random-objectstore$/{bluestore-hybrid} tasks/e2e}
"2024-08-30T19:10:54.915191+0000 mon.a (mon.0) 648 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/thrash-old-clients/{0-distro$/{centos_9.stream} 0-size-min-size-overrides/3-size-2-min-size 1-install/reef backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/connectivity msgr-failures/osd-delay rados thrashers/default thrashosds-health workloads/radosbench}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-comp-snappy rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_latest} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
Command failed on smithi005 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd pool create unique_pool_0 16 16 erasure jerasure21profile'
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/upmap-read mon_election/classic msgr-failures/fastclose msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_latest} thrashers/morepggrow thrashosds-health workloads/cache-pool-snaps}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-snappy rados tasks/mon_recovery validater/valgrind}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/singleton/{all/radostool mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test rados/test_rados_tool.sh) on smithi100 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1ef864504b8875c83ee6c2c5fedc13315bebf7f5 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_rados_tool.sh'
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/objectstore/{backends/objectcacher-stress supported-random-distro$/{centos_latest}}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/singleton-nomsgr/{all/admin_socket_output mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
hit max job timeout
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/upgrade/parallel/{0-random-distro$/{ubuntu_22.04} 0-start 1-tasks mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
"2024-08-30T19:15:55.206295+0000 mon.a (mon.0) 369 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_rgw_multisite}
Command failed on smithi022 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:1ef864504b8875c83ee6c2c5fedc13315bebf7f5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 437e148e-6702-11ef-bcd4-c7b262605968 -- bash -c \'set -e\nset -x\nwhile true; do TOKEN=$(ceph rgw realm tokens | jq -r \'"\'"\'.[0].token\'"\'"\'); echo $TOKEN; if [ "$TOKEN" != "master zone has no endpoint" ]; then break; fi; sleep 5; done\nTOKENS=$(ceph rgw realm tokens)\necho $TOKENS | jq --exit-status \'"\'"\'.[0].realm == "myrealm1"\'"\'"\'\necho $TOKENS | jq --exit-status \'"\'"\'.[0].token\'"\'"\'\nTOKEN_JSON=$(ceph rgw realm tokens | jq -r \'"\'"\'.[0].token\'"\'"\' | base64 --decode)\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.realm_name == "myrealm1"\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.endpoint | test("http://.+:\\\\d+")\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.realm_id | test("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.access_key\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.secret\'"\'"\'\n\''
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zstd rados tasks/rados_cls_all validater/valgrind}
Command failed (workunit test cls/test_cls_lock.sh) on smithi189 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1ef864504b8875c83ee6c2c5fedc13315bebf7f5 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cls/test_cls_lock.sh'
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/singleton/{all/thrash-backfill-full mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest}}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
ubuntu 22.04
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/few objectstore/bluestore-hybrid rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
Command failed on smithi067 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd pool create unique_pool_0 16 16 erasure jerasure21profile'
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/upmap-read mon_election/classic msgr-failures/fastclose msgr/async objectstore/bluestore-stupid rados supported-random-distro$/{centos_latest} thrashers/default thrashosds-health workloads/snaps-few-objects-balanced}
wip-yuri8-testing-2024-08-28-1632-squid
wip-yuri8-testing-2024-08-28-1632-squid
main
smithi
centos 9.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/classic random-objectstore$/{bluestore-comp-zstd} tasks/e2e}
Command failed (workunit test cephadm/test_dashboard_e2e.sh) on smithi033 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=1ef864504b8875c83ee6c2c5fedc13315bebf7f5 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_dashboard_e2e.sh'