ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi148 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/upgrade/parallel/{0-random-distro$/{centos_9.stream_runc} 0-start 1-tasks mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
Failed to fetch package version from https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=pacific
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/valgrind-leaks/{1-start 2-inject-leak/mon centos_latest}
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/repave-all}
Command failed on smithi083 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/off fixed-2 mon_election/classic start}
Command failed on smithi117 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_cephadm_repos}
Command failed (workunit test cephadm/test_repos.sh) on smithi052 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-add}
['Data could not be sent to remote host "smithi070.front.sepia.ceph.com". Make sure this host can be reached over ssh: ssh: connect to host smithi070.front.sepia.ceph.com port 22: No route to host']
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind}
Failed to reconnect to smithi037
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
Command failed on smithi016 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_iscsi_container/{centos_9.stream test_iscsi_container}}
Command failed (workunit test cephadm/test_iscsi_pids_limit.sh) on smithi146 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_iscsi_pids_limit.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-wait}
Command failed on smithi103 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/connectivity random-objectstore$/{bluestore-comp-snappy} tasks/e2e}
Command failed (workunit test cephadm/test_dashboard_e2e.sh) on smithi134 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_dashboard_e2e.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/singleton-nomsgr/{all/lazy_omap_stats_output mon_election/classic rados supported-random-distro$/{centos_latest}}
Command crashed: 'sudo TESTDIR=/home/ubuntu/cephtest bash -c ceph_test_lazy_omap_stats'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-snappy rados tasks/mon_recovery validater/valgrind}
saw valgrind issues
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
Command failed on smithi123 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/valgrind-leaks/{1-start 2-inject-leak/none centos_latest}
saw valgrind issues
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-delay objectstore/bluestore-bitmap rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{centos_latest} thrashers/minsize_recovery thrashosds-health workloads/ec-small-objects-balanced}
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/osd_stale_reads mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
ubuntu 20.04
rados/thrash-old-clients/{0-distro$/{ubuntu_20.04} 0-size-min-size-overrides/2-size-2-min-size 1-install/octopus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/classic msgr-failures/fastclose rados thrashers/mapgap thrashosds-health workloads/rbd_cls}
Command failed on smithi073 with status 1: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'ceph_test_cls_rbd --gtest_filter=-TestClsRbd.get_features:TestClsRbd.parents:TestClsRbd.mirror'"
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
Command failed on smithi007 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-add}
Command failed on smithi044 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
ubuntu 20.04
rados/cephadm/workunits/{0-distro/ubuntu_20.04 agent/off mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi076 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-flag}
Command failed on smithi137 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/classic task/test_cephadm_repos}
Command failed (workunit test cephadm/test_repos.sh) on smithi170 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_repos.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
ubuntu 20.04
rados/upgrade/parallel/{0-random-distro$/{ubuntu_20.04} 0-start 1-tasks mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
Command failed on smithi033 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:pacific pull'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-comp-zstd rados tasks/rados_cls_all validater/valgrind}
saw valgrind issues
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/classic start}
Command failed on smithi164 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
ubuntu 20.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/mon}
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/few rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_latest} thrashers/fastread thrashosds-health workloads/ec-small-objects-overwrites}
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
Command failed on smithi052 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/few objectstore/bluestore-comp-zlib rados recovery-overrides/{default} supported-random-distro$/{centos_latest} thrashers/careful thrashosds-health workloads/ec-small-objects}
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/valgrind-leaks/{1-start 2-inject-leak/osd centos_latest}
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/classic task/test_iscsi_container/{centos_9.stream test_iscsi_container}}
Command failed (workunit test cephadm/test_iscsi_pids_limit.sh) on smithi092 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_iscsi_pids_limit.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/repave-all}
Command failed on smithi084 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-low-osd-mem-target rados tasks/rados_api_tests validater/valgrind}
Command failed (workunit test rados/test.sh) on smithi003 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 ALLOW_TIMEOUTS=1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
ubuntu 20.04
rados/objectstore/{backends/objectstore-bluestore-a supported-random-distro$/{ubuntu_latest}}
Command failed on smithi071 with status 1: 'sudo TESTDIR=/home/ubuntu/cephtest bash -c \'mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-bluestore 20" ceph_test_objectstore --gtest_filter=*/2:-*SyntheticMatrixC* --gtest_catch_exceptions=0\''
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/classic random-objectstore$/{bluestore-comp-zstd} tasks/e2e}
Command failed (workunit test cephadm/test_dashboard_e2e.sh) on smithi120 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_dashboard_e2e.sh'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/on fixed-2 mon_election/classic start}
Command failed on smithi019 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-flag}
Command failed on smithi070 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-wait}
Command failed on smithi107 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v1only objectstore/filestore-xfs rados tasks/mon_recovery validater/valgrind}
saw valgrind issues
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_latest} tasks/rados_cls_all}
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start}
Command failed on smithi103 with status 234: 'sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn'
wip-bharath11-testing-2024-09-18-1115-quincy
wip-bharath11-testing-2024-09-18-1115-quincy
main
smithi
centos 9.stream
rados/cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi016 with status 125: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=7061ea6ca50908d104e9f3979dc615e118d9e153 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'