ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain}
"2024-03-21T11:48:48.912463+0000 mon.a (mon.0) 530 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/nfs/{cluster/{1-node} conf/{client mds mon osd} overrides/ignorelist_health supported-random-distros$/{centos_latest} tasks/nfs}
"2024-03-21T11:45:36.400980+0000 mon.a (mon.0) 480 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/classic task/test_orch_cli}
"2024-03-21T11:45:06.032949+0000 mon.a (mon.0) 458 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/orchestrator_cli/{0-random-distro$/{centos_9.stream_runc} 2-node-mgr agent/off orchestrator_cli}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} workloads/cephadm_iscsi}
"2024-03-21T11:45:57.419048+0000 mon.a (mon.0) 188 : cluster [WRN] Health check failed: 1/3 mons down, quorum a,c (MON_DOWN)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_basic}
"2024-03-21T11:49:26.102399+0000 mon.a (mon.0) 237 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-singlehost/{0-random-distro$/{centos_9.stream} 1-start 2-services/basic 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/classic start}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic}
"2024-03-21T11:58:36.023423+0000 mon.a (mon.0) 893 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v2only root}
"2024-03-21T11:54:47.966111+0000 mon.a (mon.0) 844 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python}
"2024-03-21T12:00:00.000227+0000 mon.a (mon.0) 1223 : cluster [WRN] Health detail: HEALTH_WARN Reduced data availability: 7 pgs inactive; Degraded data redundancy: 214/597 objects degraded (35.846%), 42 pgs degraded; 2 pool(s) do not have an application enabled" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_iscsi_container/{centos_9.stream test_iscsi_container}}
"2024-03-21T11:51:10.158119+0000 mon.a (mon.0) 294 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-wait}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"2024-03-21T11:55:00.499152+0000 mon.a (mon.0) 435 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_monitoring_stack_basic}
Command failed on smithi094 with status 5: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:ba760091cd7bd2b0d23f4825ac856ba66450e988 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 93e5de92-e779-11ee-95cd-87774f69a715 -- bash -c \'set -e\nset -x\nceph orch apply node-exporter\nceph orch apply grafana\nceph orch apply alertmanager\nceph orch apply prometheus\nsleep 240\nceph orch ls\nceph orch ps\nceph orch host ls\nMON_DAEMON=$(ceph orch ps --daemon-type mon -f json | jq -r \'"\'"\'last | .daemon_name\'"\'"\')\nGRAFANA_HOST=$(ceph orch ps --daemon-type grafana -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nPROM_HOST=$(ceph orch ps --daemon-type prometheus -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nALERTM_HOST=$(ceph orch ps --daemon-type alertmanager -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nGRAFANA_IP=$(ceph orch host ls -f json | jq -r --arg GRAFANA_HOST "$GRAFANA_HOST" \'"\'"\'.[] | select(.hostname==$GRAFANA_HOST) | .addr\'"\'"\')\nPROM_IP=$(ceph orch host ls -f json | jq -r --arg PROM_HOST "$PROM_HOST" \'"\'"\'.[] | select(.hostname==$PROM_HOST) | .addr\'"\'"\')\nALERTM_IP=$(ceph orch host ls -f json | jq -r --arg ALERTM_HOST "$ALERTM_HOST" \'"\'"\'.[] | select(.hostname==$ALERTM_HOST) | .addr\'"\'"\')\n# check each host node-exporter metrics endpoint is responsive\nALL_HOST_IPS=$(ceph orch host ls -f json | jq -r \'"\'"\'.[] | .addr\'"\'"\')\nfor ip in $ALL_HOST_IPS; do\n curl -s http://${ip}:9100/metric\ndone\n# check grafana endpoints are responsive and database health is okay\ncurl -k -s https://${GRAFANA_IP}:3000/api/health\ncurl -k -s https://${GRAFANA_IP}:3000/api/health | jq -e \'"\'"\'.database == "ok"\'"\'"\'\n# stop mon daemon in order to trigger an alert\nceph orch daemon stop $MON_DAEMON\nsleep 120\n# check prometheus endpoints are responsive and mon down alert is firing\ncurl -s http://${PROM_IP}:9095/api/v1/status/config\ncurl -s http://${PROM_IP}:9095/api/v1/status/config | jq -e \'"\'"\'.status == "success"\'"\'"\'\ncurl -s http://${PROM_IP}:9095/api/v1/alerts\ncurl -s http://${PROM_IP}:9095/api/v1/alerts | jq -e \'"\'"\'.data | .alerts | .[] | select(.labels | .alertname == "CephMonDown") | .state == "firing"\'"\'"\'\n# check alertmanager endpoints are responsive and mon down alert is active\ncurl -s http://${ALERTM_IP}:9093/api/v1/status\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts | jq -e \'"\'"\'.data | .[] | select(.labels | .alertname == "CephMonDown") | .status | .state == "active"\'"\'"\'\n\''
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/reef/{v18.2.1} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/connectivity task/test_orch_cli_mon}
"2024-03-21T12:06:35.081736+0000 mon.a (mon.0) 1146 : cluster [WRN] Health check failed: no active mgr (MGR_DOWN)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_domain}
"2024-03-21T11:55:48.599019+0000 mon.a (mon.0) 236 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs2 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async root}
"2024-03-21T12:44:21.333379+0000 mon.a (mon.0) 4857 : cluster [ERR] Health check failed: full ratio(s) out of order (OSD_OUT_OF_ORDER_FULL)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rotate-keys}
Config file not found: "/home/teuthworker/src/git.ceph.com_ceph-c_ba760091cd7bd2b0d23f4825ac856ba66450e988/qa/tasks/cephadm.conf".
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_rgw_multisite}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nvmeof 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/rgw-ingress 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"2024-03-21T12:03:49.813450+0000 mon.a (mon.0) 797 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 20.04
orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/connectivity}
"2024-03-21T12:07:19.760821+0000 mon.a (mon.0) 327 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_set_mon_crush_locations}
"2024-03-21T12:12:54.420263+0000 mon.a (mon.0) 479 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/classic task/test_adoption}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/repave-all}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_basic}
"2024-03-21T12:21:29.425209+0000 mon.a (mon.0) 283 : cluster [WRN] Health check failed: Degraded data redundancy: 15/45 objects degraded (33.333%), 7 pgs degraded (PG_DEGRADED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/on fixed-2 mon_election/classic start}
"2024-03-21T12:15:16.436868+0000 mon.a (mon.0) 585 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/basic 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_api_tests}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_ca_signed_key}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/client-keyring 3-final}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_cephadm}
Command failed (workunit test cephadm/test_cephadm.sh) on smithi176 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=ba760091cd7bd2b0d23f4825ac856ba66450e988 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/iscsi 3-final}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/jaeger 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-add}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_python}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_cephadm_repos}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/connectivity task/test_cephadm_timeout}
"2024-03-21T12:18:01.877814+0000 mon.a (mon.0) 202 : cluster [WRN] Health check failed: failed to probe daemons or devices (CEPHADM_REFRESH_FAILED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/orchestrator_cli/{0-random-distro$/{ubuntu_22.04} 2-node-mgr agent/on orchestrator_cli}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_domain}
"2024-03-21T12:21:48.411371+0000 mon.a (mon.0) 269 : cluster [WRN] Health check failed: Degraded data redundancy: 14/42 objects degraded (33.333%), 7 pgs degraded (PG_DEGRADED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/mirror 3-final}
No module named 'tasks'
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-singlehost/{0-random-distro$/{centos_9.stream_runc} 1-start 2-services/rgw 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/classic start}
"2024-03-21T12:21:14.027525+0000 mon.a (mon.0) 426 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/on mon_election/classic}
Command failed on smithi057 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e6560820-e77c-11ee-95cd-87774f69a715 -e sha1=ba760091cd7bd2b0d23f4825ac856ba66450e988 -- bash -c \'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e \'"\'"\'.up_to_date | length == 7\'"\'"\'\''
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-haproxy-proto 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start}
"2024-03-21T12:31:00.131882+0000 mon.a (mon.0) 643 : cluster [WRN] Health check failed: 2 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_extra_daemon_features}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-flag}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/reef/{v18.2.1} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root}
"2024-03-21T13:50:00.000123+0000 mon.a (mon.0) 1143 : cluster [WRN] Health detail: HEALTH_WARN noscrub,nodeep-scrub flag(s) set; 1 osds down; Degraded data redundancy: 122/912 objects degraded (13.377%), 24 pgs degraded" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rotate-keys}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/off mon_election/classic task/test_host_drain}
"2024-03-21T13:42:03.324053+0000 mon.a (mon.0) 510 : cluster [WRN] Health check failed: Degraded data redundancy: 2/6 objects degraded (33.333%), 1 pg degraded (PG_DEGRADED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/classic task/test_orch_cli}
"2024-03-21T13:35:18.977081+0000 mon.a (mon.0) 452 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_basic}
"2024-03-21T13:35:14.774573+0000 mon.a (mon.0) 237 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_iscsi_container/{centos_9.stream test_iscsi_container}}
hit max job timeout
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-wait}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 20.04
orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/connectivity}
"2024-03-21T14:02:02.128111+0000 mon.a (mon.0) 1073 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/on fixed-2 mon_election/classic start}
"2024-03-21T13:53:36.434819+0000 mon.a (mon.0) 416 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v1only root}
"2024-03-21T14:06:24.233955+0000 mon.a (mon.0) 1105 : cluster [WRN] Health check failed: 1 pool(s) full (POOL_FULL)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests}
"2024-03-21T14:10:00.000133+0000 mon.a (mon.0) 2614 : cluster [WRN] Health detail: HEALTH_WARN 2 pool(s) do not have an application enabled" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_monitoring_stack_basic}
Command failed on smithi077 with status 5: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:ba760091cd7bd2b0d23f4825ac856ba66450e988 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 70cfd1b4-e789-11ee-95cd-87774f69a715 -- bash -c \'set -e\nset -x\nceph orch apply node-exporter\nceph orch apply grafana\nceph orch apply alertmanager\nceph orch apply prometheus\nsleep 240\nceph orch ls\nceph orch ps\nceph orch host ls\nMON_DAEMON=$(ceph orch ps --daemon-type mon -f json | jq -r \'"\'"\'last | .daemon_name\'"\'"\')\nGRAFANA_HOST=$(ceph orch ps --daemon-type grafana -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nPROM_HOST=$(ceph orch ps --daemon-type prometheus -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nALERTM_HOST=$(ceph orch ps --daemon-type alertmanager -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nGRAFANA_IP=$(ceph orch host ls -f json | jq -r --arg GRAFANA_HOST "$GRAFANA_HOST" \'"\'"\'.[] | select(.hostname==$GRAFANA_HOST) | .addr\'"\'"\')\nPROM_IP=$(ceph orch host ls -f json | jq -r --arg PROM_HOST "$PROM_HOST" \'"\'"\'.[] | select(.hostname==$PROM_HOST) | .addr\'"\'"\')\nALERTM_IP=$(ceph orch host ls -f json | jq -r --arg ALERTM_HOST "$ALERTM_HOST" \'"\'"\'.[] | select(.hostname==$ALERTM_HOST) | .addr\'"\'"\')\n# check each host node-exporter metrics endpoint is responsive\nALL_HOST_IPS=$(ceph orch host ls -f json | jq -r \'"\'"\'.[] | .addr\'"\'"\')\nfor ip in $ALL_HOST_IPS; do\n curl -s http://${ip}:9100/metric\ndone\n# check grafana endpoints are responsive and database health is okay\ncurl -k -s https://${GRAFANA_IP}:3000/api/health\ncurl -k -s https://${GRAFANA_IP}:3000/api/health | jq -e \'"\'"\'.database == "ok"\'"\'"\'\n# stop mon daemon in order to trigger an alert\nceph orch daemon stop $MON_DAEMON\nsleep 120\n# check prometheus endpoints are responsive and mon down alert is firing\ncurl -s http://${PROM_IP}:9095/api/v1/status/config\ncurl -s http://${PROM_IP}:9095/api/v1/status/config | jq -e \'"\'"\'.status == "success"\'"\'"\'\ncurl -s http://${PROM_IP}:9095/api/v1/alerts\ncurl -s http://${PROM_IP}:9095/api/v1/alerts | jq -e \'"\'"\'.data | .alerts | .[] | select(.labels | .alertname == "CephMonDown") | .state == "firing"\'"\'"\'\n# check alertmanager endpoints are responsive and mon down alert is active\ncurl -s http://${ALERTM_IP}:9093/api/v1/status\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts | jq -e \'"\'"\'.data | .[] | select(.labels | .alertname == "CephMonDown") | .status | .state == "active"\'"\'"\'\n\''
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/connectivity task/test_orch_cli_mon}
"2024-03-21T14:12:16.953567+0000 mon.a (mon.0) 1205 : cluster [WRN] Health check failed: no active mgr (MGR_DOWN)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_domain}
"2024-03-21T13:53:10.443056+0000 mon.a (mon.0) 244 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs2 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_rgw_multisite}
"2024-03-21T13:54:09.903123+0000 mon.a (mon.0) 415 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
ubuntu 22.04
orch:cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nvmeof 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw-ingress 3-final}
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root}
"2024-03-21T14:07:27.429357+0000 mon.a (mon.0) 957 : cluster [WRN] Health check failed: Low space hindering backfill (add storage if this doesn't resolve itself): 4 pgs backfill_toofull (PG_BACKFILL_FULL)" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python}
"2024-03-21T14:10:00.000187+0000 mon.a (mon.0) 1363 : cluster [WRN] Health detail: HEALTH_WARN 2 pool(s) do not have an application enabled" in cluster log
wip-adk-testing-2024-03-20-2326
wip-adk-testing-2024-03-20-2326
main
smithi
centos 9.stream
orch:cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_set_mon_crush_locations}