Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7907326 2024-09-16 13:14:00 2024-09-16 13:15:29 2024-09-16 13:51:32 0:36:03 0:21:47 0:14:16 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
fail 7907327 2024-09-16 13:14:01 2024-09-16 13:18:40 2024-09-16 13:51:57 0:33:17 0:23:55 0:09:22 smithi main centos 9.stream orch:cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} workloads/cephadm_iscsi} 3
Failure Reason:

"2024-09-16T13:35:11.847325+0000 mon.a (mon.0) 209 : cluster [WRN] Health check failed: 1/3 mons down, quorum a,c (MON_DOWN)" in cluster log

pass 7907328 2024-09-16 13:14:02 2024-09-16 13:19:00 2024-09-16 14:02:41 0:43:41 0:32:53 0:10:48 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-reef 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
fail 7907329 2024-09-16 13:14:03 2024-09-16 13:19:00 2024-09-16 13:45:59 0:26:59 0:16:04 0:10:55 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_mgr_ctdb_res_ips} 4
Failure Reason:

SELinux denials found on ubuntu@smithi063.front.sepia.ceph.com: ['type=AVC msg=audit(1726494165.677:10846): avc: denied { nlmsg_read } for pid=61066 comm="ss" scontext=system_u:system_r:container_t:s0:c64,c509 tcontext=system_u:system_r:container_t:s0:c64,c509 tclass=netlink_tcpdiag_socket permissive=1']

pass 7907330 2024-09-16 13:14:04 2024-09-16 13:20:11 2024-09-16 13:56:09 0:35:58 0:26:34 0:09:24 smithi main ubuntu 22.04 orch:cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
fail 7907331 2024-09-16 13:14:06 2024-09-16 13:20:31 2024-09-16 13:57:05 0:36:34 0:25:42 0:10:52 smithi main ubuntu 22.04 orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_monitoring_stack_basic} 3
Failure Reason:

Command failed on smithi098 with status 5: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:8293d73f8690540e843a81caec373f9cc29cf705 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3240e16a-7431-11ef-bceb-c7b262605968 -- bash -c \'set -e\nset -x\nceph orch apply node-exporter\nceph orch apply grafana\nceph orch apply alertmanager\nceph orch apply prometheus\nsleep 240\nceph orch ls\nceph orch ps\nceph orch host ls\nMON_DAEMON=$(ceph orch ps --daemon-type mon -f json | jq -r \'"\'"\'last | .daemon_name\'"\'"\')\nGRAFANA_HOST=$(ceph orch ps --daemon-type grafana -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nPROM_HOST=$(ceph orch ps --daemon-type prometheus -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nALERTM_HOST=$(ceph orch ps --daemon-type alertmanager -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nGRAFANA_IP=$(ceph orch host ls -f json | jq -r --arg GRAFANA_HOST "$GRAFANA_HOST" \'"\'"\'.[] | select(.hostname==$GRAFANA_HOST) | .addr\'"\'"\')\nPROM_IP=$(ceph orch host ls -f json | jq -r --arg PROM_HOST "$PROM_HOST" \'"\'"\'.[] | select(.hostname==$PROM_HOST) | .addr\'"\'"\')\nALERTM_IP=$(ceph orch host ls -f json | jq -r --arg ALERTM_HOST "$ALERTM_HOST" \'"\'"\'.[] | select(.hostname==$ALERTM_HOST) | .addr\'"\'"\')\n# check each host node-exporter metrics endpoint is responsive\nALL_HOST_IPS=$(ceph orch host ls -f json | jq -r \'"\'"\'.[] | .addr\'"\'"\')\nfor ip in $ALL_HOST_IPS; do\n curl -s http://${ip}:9100/metric\ndone\n# check grafana endpoints are responsive and database health is okay\ncurl -k -s https://${GRAFANA_IP}:3000/api/health\ncurl -k -s https://${GRAFANA_IP}:3000/api/health | jq -e \'"\'"\'.database == "ok"\'"\'"\'\n# stop mon daemon in order to trigger an alert\nceph orch daemon stop $MON_DAEMON\nsleep 120\n# check prometheus endpoints are responsive and mon down alert is firing\ncurl -s http://${PROM_IP}:9095/api/v1/status/config\ncurl -s http://${PROM_IP}:9095/api/v1/status/config | jq -e \'"\'"\'.status == "success"\'"\'"\'\ncurl -s http://${PROM_IP}:9095/api/v1/alerts\ncurl -s http://${PROM_IP}:9095/api/v1/alerts | jq -e \'"\'"\'.data | .alerts | .[] | select(.labels | .alertname == "CephMonDown") | .state == "firing"\'"\'"\'\n# check alertmanager endpoints are responsive and mon down alert is active\ncurl -s http://${ALERTM_IP}:9093/api/v1/status\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts | jq -e \'"\'"\'.data | .[] | select(.labels | .alertname == "CephMonDown") | .status | .state == "active"\'"\'"\'\n\''

fail 7907332 2024-09-16 13:14:07 2024-09-16 13:21:12 2024-09-16 14:23:22 1:02:10 0:51:52 0:10:18 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (50) after waiting for 300 seconds

fail 7907333 2024-09-16 13:14:08 2024-09-16 14:17:13 2592 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
Failure Reason:

Command failed on smithi029 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid acb34024-7430-11ef-bceb-c7b262605968 -e sha1=8293d73f8690540e843a81caec373f9cc29cf705 -- bash -c \'ceph versions | jq -e \'"\'"\'.rgw | length == 1\'"\'"\'\''

fail 7907334 2024-09-16 13:14:10 2024-09-16 13:23:53 2024-09-16 13:55:23 0:31:30 0:21:32 0:09:58 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_python} 2
Failure Reason:

"2024-09-16T13:52:16.966394+0000 mon.a (mon.0) 1352 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log

fail 7907335 2024-09-16 13:14:11 2024-09-16 13:24:04 2024-09-16 13:48:13 0:24:09 0:12:27 0:11:42 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_rgw_multisite} 3
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:8293d73f8690540e843a81caec373f9cc29cf705 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3f7f3732-7431-11ef-bceb-c7b262605968 -- bash -c \'set -e\nset -x\nwhile true; do TOKEN=$(ceph rgw realm tokens | jq -r \'"\'"\'.[0].token\'"\'"\'); echo $TOKEN; if [ "$TOKEN" != "master zone has no endpoint" ]; then break; fi; sleep 5; done\nTOKENS=$(ceph rgw realm tokens)\necho $TOKENS | jq --exit-status \'"\'"\'.[0].realm == "myrealm1"\'"\'"\'\necho $TOKENS | jq --exit-status \'"\'"\'.[0].token\'"\'"\'\nTOKEN_JSON=$(ceph rgw realm tokens | jq -r \'"\'"\'.[0].token\'"\'"\' | base64 --decode)\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.realm_name == "myrealm1"\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.endpoint | test("http://.+:\\\\d+")\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.realm_id | test("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.access_key\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.secret\'"\'"\'\n\''

fail 7907336 2024-09-16 13:14:12 2024-09-16 13:24:54 2024-09-16 14:46:03 1:21:09 1:06:47 0:14:22 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (50) after waiting for 300 seconds

fail 7907337 2024-09-16 13:14:13 2024-09-16 13:28:05 2024-09-16 14:45:19 1:17:14 1:04:59 0:12:15 smithi main ubuntu 22.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
Failure Reason:

Command failed on smithi003 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 101aa070-7432-11ef-bceb-c7b262605968 -e sha1=8293d73f8690540e843a81caec373f9cc29cf705 -- bash -c \'ceph versions | jq -e \'"\'"\'.rgw | length == 1\'"\'"\'\''

fail 7907338 2024-09-16 13:14:15 2024-09-16 13:29:35 2024-09-16 14:16:32 0:46:57 0:36:52 0:10:05 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (50) after waiting for 300 seconds

fail 7907339 2024-09-16 13:14:16 2024-09-16 13:29:36 2024-09-16 14:26:04 0:56:28 0:42:33 0:13:55 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
Failure Reason:

Command failed on smithi123 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 023ad1fa-7432-11ef-bceb-c7b262605968 -e sha1=8293d73f8690540e843a81caec373f9cc29cf705 -- bash -c \'ceph versions | jq -e \'"\'"\'.rgw | length == 1\'"\'"\'\''

fail 7907340 2024-09-16 13:14:17 2024-09-16 13:32:47 2024-09-16 14:00:47 0:28:00 0:15:31 0:12:29 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_mgr_ctdb_res_ips} 4
Failure Reason:

"2024-09-16T13:58:40.674134+0000 mon.a (mon.0) 789 : cluster [WRN] Health check failed: 2 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log

fail 7907341 2024-09-16 13:14:18 2024-09-16 13:35:07 2024-09-16 14:06:19 0:31:12 0:21:56 0:09:16 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.1} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi162 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v18.2.1 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 815f7116-7432-11ef-bceb-c7b262605968 -e sha1=8293d73f8690540e843a81caec373f9cc29cf705 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

fail 7907342 2024-09-16 13:14:20 2024-09-16 13:35:08 2024-09-16 14:02:26 0:27:18 0:17:54 0:09:24 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_monitoring_stack_basic} 3
Failure Reason:

Command failed on smithi002 with status 5: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:8293d73f8690540e843a81caec373f9cc29cf705 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a2c39bca-7432-11ef-bceb-c7b262605968 -- bash -c \'set -e\nset -x\nceph orch apply node-exporter\nceph orch apply grafana\nceph orch apply alertmanager\nceph orch apply prometheus\nsleep 240\nceph orch ls\nceph orch ps\nceph orch host ls\nMON_DAEMON=$(ceph orch ps --daemon-type mon -f json | jq -r \'"\'"\'last | .daemon_name\'"\'"\')\nGRAFANA_HOST=$(ceph orch ps --daemon-type grafana -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nPROM_HOST=$(ceph orch ps --daemon-type prometheus -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nALERTM_HOST=$(ceph orch ps --daemon-type alertmanager -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nGRAFANA_IP=$(ceph orch host ls -f json | jq -r --arg GRAFANA_HOST "$GRAFANA_HOST" \'"\'"\'.[] | select(.hostname==$GRAFANA_HOST) | .addr\'"\'"\')\nPROM_IP=$(ceph orch host ls -f json | jq -r --arg PROM_HOST "$PROM_HOST" \'"\'"\'.[] | select(.hostname==$PROM_HOST) | .addr\'"\'"\')\nALERTM_IP=$(ceph orch host ls -f json | jq -r --arg ALERTM_HOST "$ALERTM_HOST" \'"\'"\'.[] | select(.hostname==$ALERTM_HOST) | .addr\'"\'"\')\n# check each host node-exporter metrics endpoint is responsive\nALL_HOST_IPS=$(ceph orch host ls -f json | jq -r \'"\'"\'.[] | .addr\'"\'"\')\nfor ip in $ALL_HOST_IPS; do\n curl -s http://${ip}:9100/metric\ndone\n# check grafana endpoints are responsive and database health is okay\ncurl -k -s https://${GRAFANA_IP}:3000/api/health\ncurl -k -s https://${GRAFANA_IP}:3000/api/health | jq -e \'"\'"\'.database == "ok"\'"\'"\'\n# stop mon daemon in order to trigger an alert\nceph orch daemon stop $MON_DAEMON\nsleep 120\n# check prometheus endpoints are responsive and mon down alert is firing\ncurl -s http://${PROM_IP}:9095/api/v1/status/config\ncurl -s http://${PROM_IP}:9095/api/v1/status/config | jq -e \'"\'"\'.status == "success"\'"\'"\'\ncurl -s http://${PROM_IP}:9095/api/v1/alerts\ncurl -s http://${PROM_IP}:9095/api/v1/alerts | jq -e \'"\'"\'.data | .alerts | .[] | select(.labels | .alertname == "CephMonDown") | .state == "firing"\'"\'"\'\n# check alertmanager endpoints are responsive and mon down alert is active\ncurl -s http://${ALERTM_IP}:9093/api/v1/status\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts | jq -e \'"\'"\'.data | .[] | select(.labels | .alertname == "CephMonDown") | .status | .state == "active"\'"\'"\'\n\''

fail 7907343 2024-09-16 13:14:21 2024-09-16 13:35:28 2024-09-16 14:06:19 0:30:51 0:19:12 0:11:39 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi100 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid cdcbf970-7432-11ef-bceb-c7b262605968 -e sha1=8293d73f8690540e843a81caec373f9cc29cf705 -- bash -c \'ceph versions | jq -e \'"\'"\'.mgr | length == 1\'"\'"\'\''

fail 7907344 2024-09-16 13:14:22 2024-09-16 13:37:09 2024-09-16 14:53:43 1:16:34 1:05:24 0:11:10 smithi main ubuntu 22.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
Failure Reason:

Command failed on smithi050 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 416e6b7e-7433-11ef-bceb-c7b262605968 -e sha1=8293d73f8690540e843a81caec373f9cc29cf705 -- bash -c \'ceph versions | jq -e \'"\'"\'.rgw | length == 1\'"\'"\'\''