Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7887547 2024-09-03 14:37:31 2024-09-03 14:39:18 2024-09-03 15:06:14 0:26:56 0:21:04 0:05:52 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
pass 7887548 2024-09-03 14:37:33 2024-09-03 14:39:18 2024-09-03 15:00:50 0:21:32 0:14:44 0:06:48 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_host_drain} 3
pass 7887549 2024-09-03 14:37:34 2024-09-03 14:39:18 2024-09-03 15:18:34 0:39:16 0:31:53 0:07:23 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7887550 2024-09-03 14:37:36 2024-09-03 14:39:19 2024-09-03 15:18:48 0:39:29 0:32:46 0:06:43 smithi main centos 9.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_9.stream 1-bootstrap/17.2.0 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
pass 7887551 2024-09-03 14:37:37 2024-09-03 14:39:39 2024-09-03 15:20:36 0:40:57 0:33:39 0:07:18 smithi main centos 9.stream orch:cephadm/nfs/{cluster/{1-node} conf/{client mds mgr mon osd} overrides/{ignore_mgr_down ignorelist_health pg_health} supported-random-distros$/{centos_latest} tasks/nfs} 1
pass 7887552 2024-09-03 14:37:38 2024-09-03 14:39:40 2024-09-03 14:59:00 0:19:20 0:13:26 0:05:54 smithi main centos 9.stream orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/classic task/test_orch_cli} 1
pass 7887553 2024-09-03 14:37:40 2024-09-03 14:39:50 2024-09-03 15:07:12 0:27:22 0:15:42 0:11:40 smithi main ubuntu 22.04 orch:cephadm/orchestrator_cli/{0-random-distro$/{ubuntu_22.04} 2-node-mgr agent/off orchestrator_cli} 2
pass 7887554 2024-09-03 14:37:41 2024-09-03 14:41:00 2024-09-03 15:11:28 0:30:28 0:20:19 0:10:09 smithi main ubuntu 22.04 orch:cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
pass 7887555 2024-09-03 14:37:42 2024-09-03 14:41:51 2024-09-03 15:11:03 0:29:12 0:22:56 0:06:16 smithi main centos 9.stream orch:cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} conf/{disable-pool-app} workloads/cephadm_iscsi} 3
pass 7887556 2024-09-03 14:37:44 2024-09-03 14:41:51 2024-09-03 15:05:09 0:23:18 0:15:49 0:07:29 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
pass 7887557 2024-09-03 14:37:45 2024-09-03 14:42:12 2024-09-03 15:11:23 0:29:11 0:19:24 0:09:47 smithi main ubuntu 22.04 orch:cephadm/smoke-singlehost/{0-random-distro$/{ubuntu_22.04} 1-start 2-services/basic 3-final} 1
pass 7887558 2024-09-03 14:37:46 2024-09-03 14:42:12 2024-09-03 15:01:03 0:18:51 0:09:53 0:08:58 smithi main centos 9.stream orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/classic start} 3
pass 7887559 2024-09-03 14:37:48 2024-09-03 14:44:03 2024-09-03 15:50:03 1:06:00 0:59:18 0:06:42 smithi main centos 9.stream orch:cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root} 2
pass 7887560 2024-09-03 14:37:49 2024-09-03 14:44:53 2024-09-03 15:23:59 0:39:06 0:33:04 0:06:02 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-reef 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
fail 7887561 2024-09-03 14:37:50 2024-09-03 14:44:53 2024-09-03 15:06:15 0:21:22 0:15:23 0:05:59 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_mgr_ctdb_res_ips} 4
Failure Reason:

SELinux denials found on ubuntu@smithi012.front.sepia.ceph.com: ['type=AVC msg=audit(1725375798.767:11628): avc: denied { nlmsg_read } for pid=65061 comm="ss" scontext=system_u:system_r:container_t:s0:c528,c675 tcontext=system_u:system_r:container_t:s0:c528,c675 tclass=netlink_tcpdiag_socket permissive=1', 'type=AVC msg=audit(1725375798.767:11627): avc: denied { nlmsg_read } for pid=65061 comm="ss" scontext=system_u:system_r:container_t:s0:c528,c675 tcontext=system_u:system_r:container_t:s0:c528,c675 tclass=netlink_tcpdiag_socket permissive=1']

pass 7887562 2024-09-03 14:37:52 2024-09-03 14:45:14 2024-09-03 15:14:38 0:29:24 0:23:10 0:06:14 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rotate-keys} 2
pass 7887563 2024-09-03 14:37:53 2024-09-03 14:45:44 2024-09-03 15:04:48 0:19:04 0:12:00 0:07:04 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_iscsi_container/{centos_9.stream test_iscsi_container}} 1
pass 7887564 2024-09-03 14:37:54 2024-09-03 14:45:45 2024-09-03 15:10:07 0:24:22 0:14:51 0:09:31 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
pass 7887565 2024-09-03 14:37:55 2024-09-03 14:49:06 2024-09-03 15:20:45 0:31:39 0:21:28 0:10:11 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final} 2
pass 7887566 2024-09-03 14:37:57 2024-09-03 14:49:06 2024-09-03 15:10:32 0:21:26 0:11:45 0:09:41 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
pass 7887567 2024-09-03 14:37:58 2024-09-03 14:51:37 2024-09-03 15:21:03 0:29:26 0:19:22 0:10:04 smithi main ubuntu 22.04 orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_mgr_domain} 2
pass 7887568 2024-09-03 14:38:00 2024-09-03 14:51:37 2024-09-03 15:27:23 0:35:46 0:26:03 0:09:43 smithi main ubuntu 22.04 orch:cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
pass 7887569 2024-09-03 14:38:01 2024-09-03 14:51:38 2024-09-03 15:26:39 0:35:01 0:27:42 0:07:19 smithi main centos 9.stream orch:cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
pass 7887570 2024-09-03 14:38:03 2024-09-03 14:51:38 2024-09-03 15:45:36 0:53:58 0:44:06 0:09:52 smithi main ubuntu 22.04 orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_api_tests} 2
fail 7887571 2024-09-03 14:38:04 2024-09-03 14:51:58 2024-09-03 15:27:43 0:35:45 0:25:15 0:10:30 smithi main ubuntu 22.04 orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_monitoring_stack_basic} 3
Failure Reason:

Command failed on smithi005 with status 5: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:710747031b78948d4a373036f5b8c41269b5a399 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a65ac028-6a06-11ef-bcd6-c7b262605968 -- bash -c \'set -e\nset -x\nceph orch apply node-exporter\nceph orch apply grafana\nceph orch apply alertmanager\nceph orch apply prometheus\nsleep 240\nceph orch ls\nceph orch ps\nceph orch host ls\nMON_DAEMON=$(ceph orch ps --daemon-type mon -f json | jq -r \'"\'"\'last | .daemon_name\'"\'"\')\nGRAFANA_HOST=$(ceph orch ps --daemon-type grafana -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nPROM_HOST=$(ceph orch ps --daemon-type prometheus -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nALERTM_HOST=$(ceph orch ps --daemon-type alertmanager -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nGRAFANA_IP=$(ceph orch host ls -f json | jq -r --arg GRAFANA_HOST "$GRAFANA_HOST" \'"\'"\'.[] | select(.hostname==$GRAFANA_HOST) | .addr\'"\'"\')\nPROM_IP=$(ceph orch host ls -f json | jq -r --arg PROM_HOST "$PROM_HOST" \'"\'"\'.[] | select(.hostname==$PROM_HOST) | .addr\'"\'"\')\nALERTM_IP=$(ceph orch host ls -f json | jq -r --arg ALERTM_HOST "$ALERTM_HOST" \'"\'"\'.[] | select(.hostname==$ALERTM_HOST) | .addr\'"\'"\')\n# check each host node-exporter metrics endpoint is responsive\nALL_HOST_IPS=$(ceph orch host ls -f json | jq -r \'"\'"\'.[] | .addr\'"\'"\')\nfor ip in $ALL_HOST_IPS; do\n curl -s http://${ip}:9100/metric\ndone\n# check grafana endpoints are responsive and database health is okay\ncurl -k -s https://${GRAFANA_IP}:3000/api/health\ncurl -k -s https://${GRAFANA_IP}:3000/api/health | jq -e \'"\'"\'.database == "ok"\'"\'"\'\n# stop mon daemon in order to trigger an alert\nceph orch daemon stop $MON_DAEMON\nsleep 120\n# check prometheus endpoints are responsive and mon down alert is firing\ncurl -s http://${PROM_IP}:9095/api/v1/status/config\ncurl -s http://${PROM_IP}:9095/api/v1/status/config | jq -e \'"\'"\'.status == "success"\'"\'"\'\ncurl -s http://${PROM_IP}:9095/api/v1/alerts\ncurl -s http://${PROM_IP}:9095/api/v1/alerts | jq -e \'"\'"\'.data | .alerts | .[] | select(.labels | .alertname == "CephMonDown") | .state == "firing"\'"\'"\'\n# check alertmanager endpoints are responsive and mon down alert is active\ncurl -s http://${ALERTM_IP}:9093/api/v1/status\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts | jq -e \'"\'"\'.data | .[] | select(.labels | .alertname == "CephMonDown") | .status | .state == "active"\'"\'"\'\n\''

fail 7887572 2024-09-03 14:38:06 2024-09-03 14:51:59 2024-09-03 15:51:03 0:59:04 0:52:26 0:06:38 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (50) after waiting for 300 seconds

pass 7887573 2024-09-03 14:38:07 2024-09-03 14:52:19 2024-09-03 15:12:30 0:20:11 0:13:17 0:06:54 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs 3-final} 2
fail 7887574 2024-09-03 14:38:08 2024-09-03 14:53:20 2024-09-03 15:42:32 0:49:12 0:42:19 0:06:53 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
Failure Reason:

Command failed on smithi138 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 91d59bd8-6a05-11ef-bcd6-c7b262605968 -e sha1=710747031b78948d4a373036f5b8c41269b5a399 -- bash -c \'ceph versions | jq -e \'"\'"\'.rgw | length == 1\'"\'"\'\''

pass 7887575 2024-09-03 14:38:10 2024-09-03 14:53:20 2024-09-03 15:23:52 0:30:32 0:23:02 0:07:30 smithi main centos 9.stream orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/connectivity task/test_orch_cli_mon} 5
pass 7887576 2024-09-03 14:38:11 2024-09-03 14:54:32 2024-09-03 15:13:30 0:18:58 0:12:51 0:06:07 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
pass 7887577 2024-09-03 14:38:12 2024-09-03 14:54:42 2024-09-03 15:12:47 0:18:05 0:10:13 0:07:52 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_mgr_res_basic} 2
pass 7887578 2024-09-03 14:38:14 2024-09-03 14:55:53 2024-09-03 15:23:21 0:27:28 0:20:57 0:06:31 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_python} 2
fail 7887579 2024-09-03 14:38:15 2024-09-03 14:56:23 2024-09-03 15:15:08 0:18:45 0:12:44 0:06:01 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_rgw_multisite} 3
Failure Reason:

Command failed on smithi002 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:710747031b78948d4a373036f5b8c41269b5a399 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5477cb7a-6a06-11ef-bcd6-c7b262605968 -- bash -c \'set -e\nset -x\nwhile true; do TOKEN=$(ceph rgw realm tokens | jq -r \'"\'"\'.[0].token\'"\'"\'); echo $TOKEN; if [ "$TOKEN" != "master zone has no endpoint" ]; then break; fi; sleep 5; done\nTOKENS=$(ceph rgw realm tokens)\necho $TOKENS | jq --exit-status \'"\'"\'.[0].realm == "myrealm1"\'"\'"\'\necho $TOKENS | jq --exit-status \'"\'"\'.[0].token\'"\'"\'\nTOKEN_JSON=$(ceph rgw realm tokens | jq -r \'"\'"\'.[0].token\'"\'"\' | base64 --decode)\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.realm_name == "myrealm1"\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.endpoint | test("http://.+:\\\\d+")\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.realm_id | test("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.access_key\'"\'"\'\necho $TOKEN_JSON | jq --exit-status \'"\'"\'.secret\'"\'"\'\n\''

pass 7887580 2024-09-03 14:38:17 2024-09-03 14:56:24 2024-09-03 15:26:55 0:30:31 0:19:59 0:10:32 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nvmeof 3-final} 2
pass 7887581 2024-09-03 14:38:18 2024-09-03 14:57:24 2024-09-03 15:16:24 0:19:00 0:12:53 0:06:07 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
pass 7887582 2024-09-03 14:38:19 2024-09-03 14:57:24 2024-09-03 15:55:38 0:58:14 0:47:25 0:10:49 smithi main ubuntu 22.04 orch:cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root} 2
pass 7887583 2024-09-03 14:38:21 2024-09-03 14:57:45 2024-09-03 15:38:30 0:40:45 0:32:35 0:08:10 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
dead 7887584 2024-09-03 14:38:22 2024-09-03 14:59:15 2024-09-03 23:07:46 8:08:31 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

hit max job timeout

pass 7887585 2024-09-03 14:38:23 2024-09-03 14:59:36 2024-09-03 15:18:48 0:19:12 0:10:14 0:08:58 smithi main centos 9.stream orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 3
pass 7887586 2024-09-03 14:38:24 2024-09-03 15:01:06 2024-09-03 16:00:44 0:59:38 0:50:26 0:09:12 smithi main ubuntu 22.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04-reef 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
pass 7887587 2024-09-03 14:38:26 2024-09-03 15:01:07 2024-09-03 15:18:06 0:16:59 0:10:25 0:06:34 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_mgr_res_dom} 2
pass 7887588 2024-09-03 14:38:27 2024-09-03 15:01:17 2024-09-03 15:33:18 0:32:01 0:22:30 0:09:31 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rotate-keys} 2
pass 7887589 2024-09-03 14:38:28 2024-09-03 15:04:08 2024-09-03 15:26:15 0:22:07 0:14:56 0:07:11 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_set_mon_crush_locations} 3
pass 7887590 2024-09-03 14:38:29 2024-09-03 15:04:38 2024-09-03 15:25:42 0:21:04 0:13:51 0:07:13 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw 3-final} 2
pass 7887591 2024-09-03 14:38:31 2024-09-03 15:04:39 2024-09-03 15:26:38 0:21:59 0:12:44 0:09:15 smithi main ubuntu 22.04 orch:cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/classic task/test_adoption} 1
pass 7887592 2024-09-03 14:38:32 2024-09-03 15:04:59 2024-09-03 15:24:29 0:19:30 0:11:40 0:07:50 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/deploy-raw} 2
pass 7887593 2024-09-03 14:38:33 2024-09-03 15:05:30 2024-09-03 15:25:21 0:19:51 0:11:42 0:08:09 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_basic} 2
pass 7887594 2024-09-03 14:38:35 2024-09-03 15:06:30 2024-09-03 15:27:24 0:20:54 0:14:21 0:06:33 smithi main centos 9.stream orch:cephadm/smoke/{0-distro/centos_9.stream 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
pass 7887595 2024-09-03 14:38:36 2024-09-03 15:06:31 2024-09-03 15:36:08 0:29:37 0:19:18 0:10:19 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/basic 3-final} 2
pass 7887596 2024-09-03 14:38:37 2024-09-03 15:06:31 2024-09-03 15:48:31 0:42:00 0:33:39 0:08:21 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
pass 7887597 2024-09-03 14:38:39 2024-09-03 15:07:32 2024-09-03 15:26:25 0:18:53 0:10:13 0:08:40 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_ca_signed_key} 2
fail 7887598 2024-09-03 14:38:40 2024-09-03 15:08:12 2024-09-03 16:05:39 0:57:27 0:50:57 0:06:30 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (50) after waiting for 300 seconds

pass 7887599 2024-09-03 14:38:41 2024-09-03 15:08:22 2024-09-03 15:27:57 0:19:35 0:12:08 0:07:27 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
fail 7887600 2024-09-03 14:38:43 2024-09-03 15:08:53 2024-09-04 12:22:53 21:14:00 18:00:28 3:13:32 smithi main ubuntu 22.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
Failure Reason:

Command failed on smithi084 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 83e41118-6a22-11ef-bcd6-c7b262605968 -e sha1=710747031b78948d4a373036f5b8c41269b5a399 -- bash -c \'ceph versions | jq -e \'"\'"\'.rgw | length == 1\'"\'"\'\''

pass 7887601 2024-09-03 14:38:44 2024-09-03 15:08:53 2024-09-03 15:38:02 0:29:09 0:18:37 0:10:32 smithi main ubuntu 22.04 orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_domain} 2
pass 7887602 2024-09-03 14:38:45 2024-09-03 15:10:24 2024-09-03 15:52:21 0:41:57 0:33:08 0:08:49 smithi main ubuntu 22.04 orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python} 2
pass 7887603 2024-09-03 14:38:47 2024-09-03 15:10:44 2024-09-03 15:40:39 0:29:55 0:19:59 0:09:56 smithi main ubuntu 22.04 orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_cephadm} 1
pass 7887604 2024-09-03 14:38:48 2024-09-03 15:11:15 2024-09-03 15:30:38 0:19:23 0:12:54 0:06:29 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
pass 7887605 2024-09-03 14:38:49 2024-09-03 15:11:25 2024-09-03 15:41:19 0:29:54 0:19:59 0:09:55 smithi main ubuntu 22.04 orch:cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/repave-all} 2
pass 7887606 2024-09-03 14:38:51 2024-09-03 15:11:46 2024-09-03 16:18:03 1:06:17 0:57:42 0:08:35 smithi main centos 9.stream orch:cephadm/thrash/{0-distro/centos_9.stream 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v2only root} 2
pass 7887607 2024-09-03 14:38:52 2024-09-03 15:12:46 2024-09-03 15:44:37 0:31:51 0:22:12 0:09:39 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/jaeger 3-final} 2
pass 7887608 2024-09-03 14:38:53 2024-09-03 15:12:47 2024-09-03 15:29:55 0:17:08 0:09:39 0:07:29 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_mgr_basic} 2
pass 7887609 2024-09-03 14:38:54 2024-09-03 15:12:57 2024-09-03 15:42:37 0:29:40 0:22:27 0:07:13 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rotate-keys} 2
pass 7887610 2024-09-03 14:38:56 2024-09-03 15:13:48 2024-09-03 15:26:22 0:12:34 0:05:30 0:07:04 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_cephadm_repos} 1
fail 7887611 2024-09-03 14:38:57 2024-09-03 15:13:48 2024-09-03 15:53:37 0:39:49 0:32:23 0:07:26 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.0} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

"2024-09-03T15:40:00.000082+0000 mon.smithi007 (mon.0) 260 : cluster [WRN] Health detail: HEALTH_WARN 1 osds down" in cluster log

pass 7887612 2024-09-03 14:38:58 2024-09-03 15:14:48 2024-09-03 15:33:58 0:19:10 0:12:51 0:06:19 smithi main centos 9.stream orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream mon_election/connectivity task/test_cephadm_timeout} 1
pass 7887613 2024-09-03 14:39:00 2024-09-03 15:15:19 2024-09-03 15:40:45 0:25:26 0:15:48 0:09:38 smithi main ubuntu 22.04 orch:cephadm/orchestrator_cli/{0-random-distro$/{ubuntu_22.04} 2-node-mgr agent/on orchestrator_cli} 2
pass 7887614 2024-09-03 14:39:01 2024-09-03 15:15:19 2024-09-03 15:35:52 0:20:33 0:12:37 0:07:56 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/mirror 3-final} 2
pass 7887615 2024-09-03 14:39:02 2024-09-03 15:16:40 2024-09-03 15:35:08 0:18:28 0:11:26 0:07:02 smithi main centos 9.stream orch:cephadm/smoke-singlehost/{0-random-distro$/{centos_9.stream} 1-start 2-services/rgw 3-final} 1
pass 7887616 2024-09-03 14:39:03 2024-09-03 15:18:20 2024-09-03 15:36:04 0:17:44 0:10:48 0:06:56 smithi main centos 9.stream orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/on fixed-2 mon_election/classic start} 3
pass 7887617 2024-09-03 14:39:05 2024-09-03 15:18:51 2024-09-03 15:56:11 0:37:20 0:30:07 0:07:13 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-reef 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/connectivity} 2
fail 7887618 2024-09-03 14:39:06 2024-09-03 15:19:01 2024-09-03 15:30:51 0:11:50 0:03:39 0:08:11 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-haproxy-proto 3-final} 2
Failure Reason:

['Failed to manage policy for boolean nagios_run_sudo: [Errno 11] Resource temporarily unavailable']

fail 7887619 2024-09-03 14:39:07 2024-09-03 15:19:01 2024-09-03 15:38:49 0:19:48 0:12:24 0:07:24 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi163 with status 1: 'mkdir /home/ubuntu/cephtest/archive/audit && sudo cp /var/log/audit/audit.log /home/ubuntu/cephtest/archive/audit && sudo chown $USER /home/ubuntu/cephtest/archive/audit/audit.log && gzip /home/ubuntu/cephtest/archive/audit/audit.log'

pass 7887620 2024-09-03 14:39:09 2024-09-03 15:44:01 910 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_mgr_ctdb_res_basic} 4
pass 7887621 2024-09-03 14:39:10 2024-09-03 15:21:02 2024-09-03 15:42:12 0:21:10 0:14:00 0:07:10 smithi main centos 9.stream orch:cephadm/smoke/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
pass 7887622 2024-09-03 14:39:11 2024-09-03 15:21:23 2024-09-03 15:58:39 0:37:16 0:28:52 0:08:24 smithi main centos 9.stream orch:cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async root} 2
pass 7887623 2024-09-03 14:39:13 2024-09-03 15:23:34 2024-09-03 16:05:24 0:41:50 0:33:41 0:08:09 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_api_tests} 2
pass 7887624 2024-09-03 14:39:14 2024-09-03 15:24:04 2024-09-03 15:42:49 0:18:45 0:12:40 0:06:05 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/on mon_election/connectivity task/test_extra_daemon_features} 2
pass 7887625 2024-09-03 14:39:15 2024-09-03 15:24:04 2024-09-03 15:55:55 0:31:51 0:21:36 0:10:15 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
fail 7887626 2024-09-03 14:39:17 2024-09-03 15:24:15 2024-09-03 16:07:15 0:43:00 0:36:53 0:06:07 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (50) after waiting for 300 seconds

pass 7887627 2024-09-03 14:39:18 2024-09-03 15:45:46 812 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
fail 7887628 2024-09-03 14:39:19 2024-09-03 15:24:46 2024-09-03 16:14:01 0:49:15 0:41:50 0:07:25 smithi main centos 9.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
Failure Reason:

Command failed on smithi043 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fb19e00a-6a09-11ef-bcd6-c7b262605968 -e sha1=710747031b78948d4a373036f5b8c41269b5a399 -- bash -c \'ceph versions | jq -e \'"\'"\'.rgw | length == 1\'"\'"\'\''

pass 7887629 2024-09-03 14:39:21 2024-09-03 15:25:06 2024-09-03 16:01:42 0:36:36 0:26:37 0:09:59 smithi main ubuntu 22.04 orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_mgr_ctdb_res_dom} 4
pass 7887630 2024-09-03 14:39:22 2024-09-03 15:25:37 2024-09-03 16:07:38 0:42:01 0:32:26 0:09:35 smithi main ubuntu 22.04 orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_python} 2
pass 7887631 2024-09-03 14:39:23 2024-09-03 15:25:57 2024-09-03 16:00:46 0:34:49 0:23:33 0:11:16 smithi main ubuntu 22.04 orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/off mon_election/classic task/test_host_drain} 3
pass 7887632 2024-09-03 14:39:25 2024-09-03 15:26:38 2024-09-03 15:45:37 0:18:59 0:13:07 0:05:52 smithi main centos 9.stream orch:cephadm/no-agent-workunits/{0-distro/centos_9.stream_runc mon_election/classic task/test_orch_cli} 1
fail 7887633 2024-09-03 14:39:26 2024-09-03 15:26:38 2024-09-03 15:39:45 0:13:07 0:05:27 0:07:40 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
Failure Reason:

Command failed on smithi006 with status 5: 'sudo systemctl stop ceph-3db55b60-6a0a-11ef-bcd6-c7b262605968@mon.smithi006'

pass 7887634 2024-09-03 14:39:28 2024-09-03 15:26:38 2024-09-03 15:47:49 0:21:11 0:15:21 0:05:50 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
pass 7887635 2024-09-03 14:39:29 2024-09-03 15:26:49 2024-09-03 16:24:47 0:57:58 0:48:57 0:09:01 smithi main ubuntu 22.04 orch:cephadm/thrash/{0-distro/ubuntu_22.04 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v1only root} 2
fail 7887636 2024-09-03 14:39:30 2024-09-03 15:27:09 2024-09-03 15:50:44 0:23:35 0:15:29 0:08:06 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_mgr_ctdb_res_ips} 4
Failure Reason:

SELinux denials found on ubuntu@smithi142.front.sepia.ceph.com: ['type=AVC msg=audit(1725378366.305:10868): avc: denied { nlmsg_read } for pid=57474 comm="ss" scontext=system_u:system_r:container_t:s0:c486,c894 tcontext=system_u:system_r:container_t:s0:c486,c894 tclass=netlink_tcpdiag_socket permissive=1']

pass 7887637 2024-09-03 14:39:32 2024-09-03 15:27:40 2024-09-03 15:57:05 0:29:25 0:22:46 0:06:39 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rotate-keys} 2
pass 7887638 2024-09-03 14:39:33 2024-09-03 15:28:00 2024-09-03 15:46:58 0:18:58 0:12:14 0:06:44 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream agent/on mon_election/connectivity task/test_iscsi_container/{centos_9.stream test_iscsi_container}} 1
pass 7887639 2024-09-03 14:39:35 2024-09-03 15:28:00 2024-09-03 16:01:55 0:33:55 0:23:43 0:10:12 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
pass 7887640 2024-09-03 14:39:36 2024-09-03 15:28:11 2024-09-03 16:09:13 0:41:02 0:32:20 0:08:42 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/reef/{v18.2.1} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
pass 7887641 2024-09-03 14:39:38 2024-09-03 15:30:11 2024-09-03 15:50:10 0:19:59 0:12:49 0:07:10 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nfs-keepalive-only 3-final} 2
pass 7887642 2024-09-03 14:39:39 2024-09-03 15:30:52 2024-09-03 15:50:50 0:19:58 0:10:19 0:09:39 smithi main centos 9.stream orch:cephadm/smoke-small/{0-distro/centos_9.stream_runc 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 3
fail 7887643 2024-09-03 14:39:40 2024-09-03 15:33:33 2024-09-03 16:17:39 0:44:06 0:34:24 0:09:42 smithi main ubuntu 22.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04-reef 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/connectivity} 2
Failure Reason:

Command failed on smithi005 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:reef shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 37a14e08-6a0c-11ef-bcd6-c7b262605968 -e sha1=710747031b78948d4a373036f5b8c41269b5a399 -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

pass 7887644 2024-09-03 14:39:42 2024-09-03 15:34:13 2024-09-03 16:05:34 0:31:21 0:19:43 0:11:38 smithi main ubuntu 22.04 orch:cephadm/osds/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
pass 7887645 2024-09-03 14:39:43 2024-09-03 15:36:04 2024-09-03 15:53:22 0:17:18 0:10:39 0:06:39 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream_runc tasks/deploy_smb_mgr_domain} 2
pass 7887646 2024-09-03 14:39:45 2024-09-03 15:36:14 2024-09-03 16:11:55 0:35:41 0:26:00 0:09:41 smithi main ubuntu 22.04 orch:cephadm/smoke/{0-distro/ubuntu_22.04 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
pass 7887647 2024-09-03 14:39:46 2024-09-03 15:36:24 2024-09-03 16:17:40 0:41:16 0:33:31 0:07:45 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream_runc fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
fail 7887648 2024-09-03 14:39:47 2024-09-03 15:36:25 2024-09-03 16:03:59 0:27:34 0:17:34 0:10:00 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream_runc agent/off mon_election/classic task/test_monitoring_stack_basic} 3
Failure Reason:

Command failed on smithi083 with status 5: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:710747031b78948d4a373036f5b8c41269b5a399 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4f7e7faa-6a0c-11ef-bcd6-c7b262605968 -- bash -c \'set -e\nset -x\nceph orch apply node-exporter\nceph orch apply grafana\nceph orch apply alertmanager\nceph orch apply prometheus\nsleep 240\nceph orch ls\nceph orch ps\nceph orch host ls\nMON_DAEMON=$(ceph orch ps --daemon-type mon -f json | jq -r \'"\'"\'last | .daemon_name\'"\'"\')\nGRAFANA_HOST=$(ceph orch ps --daemon-type grafana -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nPROM_HOST=$(ceph orch ps --daemon-type prometheus -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nALERTM_HOST=$(ceph orch ps --daemon-type alertmanager -f json | jq -e \'"\'"\'.[]\'"\'"\' | jq -r \'"\'"\'.hostname\'"\'"\')\nGRAFANA_IP=$(ceph orch host ls -f json | jq -r --arg GRAFANA_HOST "$GRAFANA_HOST" \'"\'"\'.[] | select(.hostname==$GRAFANA_HOST) | .addr\'"\'"\')\nPROM_IP=$(ceph orch host ls -f json | jq -r --arg PROM_HOST "$PROM_HOST" \'"\'"\'.[] | select(.hostname==$PROM_HOST) | .addr\'"\'"\')\nALERTM_IP=$(ceph orch host ls -f json | jq -r --arg ALERTM_HOST "$ALERTM_HOST" \'"\'"\'.[] | select(.hostname==$ALERTM_HOST) | .addr\'"\'"\')\n# check each host node-exporter metrics endpoint is responsive\nALL_HOST_IPS=$(ceph orch host ls -f json | jq -r \'"\'"\'.[] | .addr\'"\'"\')\nfor ip in $ALL_HOST_IPS; do\n curl -s http://${ip}:9100/metric\ndone\n# check grafana endpoints are responsive and database health is okay\ncurl -k -s https://${GRAFANA_IP}:3000/api/health\ncurl -k -s https://${GRAFANA_IP}:3000/api/health | jq -e \'"\'"\'.database == "ok"\'"\'"\'\n# stop mon daemon in order to trigger an alert\nceph orch daemon stop $MON_DAEMON\nsleep 120\n# check prometheus endpoints are responsive and mon down alert is firing\ncurl -s http://${PROM_IP}:9095/api/v1/status/config\ncurl -s http://${PROM_IP}:9095/api/v1/status/config | jq -e \'"\'"\'.status == "success"\'"\'"\'\ncurl -s http://${PROM_IP}:9095/api/v1/alerts\ncurl -s http://${PROM_IP}:9095/api/v1/alerts | jq -e \'"\'"\'.data | .alerts | .[] | select(.labels | .alertname == "CephMonDown") | .state == "firing"\'"\'"\'\n# check alertmanager endpoints are responsive and mon down alert is active\ncurl -s http://${ALERTM_IP}:9093/api/v1/status\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts\ncurl -s http://${ALERTM_IP}:9093/api/v1/alerts | jq -e \'"\'"\'.data | .[] | select(.labels | .alertname == "CephMonDown") | .status | .state == "active"\'"\'"\'\n\''

pass 7887649 2024-09-03 14:39:49 2024-09-03 15:38:46 2024-09-03 16:00:51 0:22:05 0:13:45 0:08:20 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/nfs 3-final} 2
pass 7887650 2024-09-03 14:39:50 2024-09-03 16:30:38 2413 smithi main ubuntu 22.04 orch:cephadm/no-agent-workunits/{0-distro/ubuntu_22.04 mon_election/connectivity task/test_orch_cli_mon} 5
pass 7887651 2024-09-03 14:39:51 2024-09-03 15:40:57 2024-09-03 16:11:10 0:30:13 0:20:44 0:09:29 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
pass 7887652 2024-09-03 14:39:53 2024-09-03 15:41:37 2024-09-03 16:09:03 0:27:26 0:18:10 0:09:16 smithi main ubuntu 22.04 orch:cephadm/smb/{0-distro/ubuntu_22.04 tasks/deploy_smb_mgr_res_basic} 2
pass 7887653 2024-09-03 14:39:54 2024-09-03 15:41:38 2024-09-03 16:25:55 0:44:17 0:33:26 0:10:51 smithi main ubuntu 22.04 orch:cephadm/with-work/{0-distro/ubuntu_22.04 fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
dead 7887654 2024-09-03 14:39:55 2024-09-03 15:44:13 2024-09-03 23:53:49 8:09:36 smithi main ubuntu 22.04 orch:cephadm/workunits/{0-distro/ubuntu_22.04 agent/on mon_election/connectivity task/test_rgw_multisite} 3
Failure Reason:

hit max job timeout

fail 7887655 2024-09-03 14:39:57 2024-09-03 15:44:14 2024-09-03 16:28:01 0:43:47 0:36:42 0:07:05 smithi main centos 9.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

reached maximum tries (50) after waiting for 300 seconds

pass 7887656 2024-09-03 14:39:58 2024-09-03 15:44:55 2024-09-03 16:04:53 0:19:58 0:12:10 0:07:48 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/nvmeof 3-final} 2
fail 7887657 2024-09-03 14:40:00 2024-09-03 15:45:56 2024-09-03 16:57:42 1:11:46 1:02:48 0:08:58 smithi main ubuntu 22.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_22.04-squid 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
Failure Reason:

Command failed on smithi079 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:squid shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid defa293a-6a0d-11ef-bcd6-c7b262605968 -e sha1=710747031b78948d4a373036f5b8c41269b5a399 -- bash -c \'ceph versions | jq -e \'"\'"\'.rgw | length == 1\'"\'"\'\''

pass 7887658 2024-09-03 14:40:01 2024-09-03 15:46:06 2024-09-03 16:06:14 0:20:08 0:12:58 0:07:10 smithi main centos 9.stream orch:cephadm/osds/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
pass 7887659 2024-09-03 14:40:02 2024-09-03 15:47:17 2024-09-03 16:40:40 0:53:23 0:46:45 0:06:38 smithi main centos 9.stream orch:cephadm/thrash/{0-distro/centos_9.stream_runc 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async root} 2
dead 7887660 2024-09-03 14:40:03 2024-09-03 15:47:27 2024-09-03 23:56:09 8:08:42 smithi main centos 9.stream orch:cephadm/smoke-roleless/{0-distro/centos_9.stream_runc 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
Failure Reason:

hit max job timeout

pass 7887661 2024-09-03 14:40:05 2024-09-03 15:47:27 2024-09-03 16:05:07 0:17:40 0:10:31 0:07:09 smithi main centos 9.stream orch:cephadm/smb/{0-distro/centos_9.stream tasks/deploy_smb_mgr_res_dom} 2
pass 7887662 2024-09-03 14:40:06 2024-09-03 15:48:08 2024-09-03 16:17:53 0:29:45 0:22:51 0:06:54 smithi main centos 9.stream orch:cephadm/with-work/{0-distro/centos_9.stream fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rotate-keys} 2
pass 7887663 2024-09-03 14:40:07 2024-09-03 15:48:48 2024-09-03 16:11:12 0:22:24 0:14:47 0:07:37 smithi main centos 9.stream orch:cephadm/workunits/{0-distro/centos_9.stream agent/off mon_election/classic task/test_set_mon_crush_locations} 3
pass 7887664 2024-09-03 14:40:09 2024-09-03 15:50:19 2024-09-03 16:22:25 0:32:06 0:21:48 0:10:18 smithi main ubuntu 22.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw 3-final} 2