Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 6901246 2022-06-27 18:03:39 2022-06-28 00:18:40 2022-06-28 00:58:50 0:40:10 0:31:38 0:08:32 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
fail 6901247 2022-06-27 18:03:40 2022-06-28 00:18:41 2022-06-28 00:40:35 0:21:54 0:11:53 0:10:01 smithi main centos 8.stream orch:cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi163 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a31b489a-f679-11ec-842c-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

pass 6901248 2022-06-27 18:03:41 2022-06-28 00:18:41 2022-06-28 01:02:29 0:43:48 0:33:50 0:09:58 smithi main ubuntu 20.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/classic} 2
pass 6901249 2022-06-27 18:03:42 2022-06-28 00:18:41 2022-06-28 00:40:01 0:21:20 0:12:48 0:08:32 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/mirror 3-final} 2
fail 6901250 2022-06-27 18:03:44 2022-06-28 00:18:42 2022-06-28 00:38:17 0:19:35 0:09:58 0:09:37 smithi main centos 8.stream orch:cephadm/thrash/{0-distro/centos_8.stream_container_tools_crun 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi074 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f87b43ee-f679-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi074:vg_nvme/lv_4'

fail 6901251 2022-06-27 18:03:45 2022-06-28 00:18:42 2022-06-28 00:36:56 0:18:14 0:09:48 0:08:26 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools_crun fixed-2 mode/packaged mon_election/classic msgr/async-v1only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi079 with status 22: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid bfaa140a-f679-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi079:vg_nvme/lv_4'

dead 6901252 2022-06-27 18:03:46 2022-06-28 00:18:42 2022-06-28 12:34:12 12:15:30 smithi main centos 8.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/16.2.4 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

hit max job timeout

dead 6901253 2022-06-27 18:03:47 2022-06-28 00:18:43 2022-06-28 00:31:39 0:12:56 0:04:10 0:08:46 smithi main rhel 8.6 orch:cephadm/orchestrator_cli/{0-random-distro$/{rhel_8.6_container_tools_3.0} 2-node-mgr agent/off orchestrator_cli} 2
Failure Reason:

{'smithi203.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['http://satellite.front.sepia.ceph.com/pub/katello-ca-consumer-latest.noarch.rpm'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': False}}, 'msg': 'Failed to validate GPG signature for katello-ca-consumer-satellite.front.sepia.ceph.com-1.0-4.noarch'}, 'smithi003.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['http://satellite.front.sepia.ceph.com/pub/katello-ca-consumer-latest.noarch.rpm'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': False}}, 'msg': 'Failed to validate GPG signature for katello-ca-consumer-satellite.front.sepia.ceph.com-1.0-4.noarch'}}

fail 6901254 2022-06-27 18:03:48 2022-06-28 00:18:43 2022-06-28 00:36:22 0:17:39 0:07:51 0:09:48 smithi main centos 8.stream orch:cephadm/rbd_iscsi/{0-single-container-host base/install cluster/{fixed-3 openstack} workloads/cephadm_iscsi} 3
Failure Reason:

Command failed on smithi133 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a37498aa-f679-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi133:vg_nvme/lv_4'

fail 6901255 2022-06-27 18:03:50 2022-06-28 00:18:43 2022-06-28 00:35:05 0:16:22 0:07:38 0:08:44 smithi main centos 8.stream orch:cephadm/smoke-singlehost/{0-random-distro$/{centos_8.stream_container_tools} 1-start 2-services/basic 3-final} 1
Failure Reason:

Command failed on smithi109 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 587399c8-f679-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi109:vg_nvme/lv_4'

pass 6901256 2022-06-27 18:03:51 2022-06-28 00:18:44 2022-06-28 00:34:59 0:16:15 0:04:26 0:11:49 smithi main orch:cephadm/workunits/{agent/off mon_election/connectivity task/test_cephadm_repos} 1
dead 6901257 2022-06-27 18:03:52 2022-06-28 00:18:44 2022-06-28 00:31:43 0:12:59 0:04:10 0:08:49 smithi main rhel 8.6 orch:cephadm/smoke/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

{'smithi153.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['http://satellite.front.sepia.ceph.com/pub/katello-ca-consumer-latest.noarch.rpm'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': False}}, 'msg': 'Failed to validate GPG signature for katello-ca-consumer-satellite.front.sepia.ceph.com-1.0-4.noarch'}, 'smithi119.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['http://satellite.front.sepia.ceph.com/pub/katello-ca-consumer-latest.noarch.rpm'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': False}}, 'msg': 'Failed to validate GPG signature for katello-ca-consumer-satellite.front.sepia.ceph.com-1.0-4.noarch'}}

fail 6901258 2022-06-27 18:03:53 2022-06-28 00:18:45 2022-06-28 00:54:57 0:36:12 0:28:59 0:07:13 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

reached maximum tries (300) after waiting for 300 seconds

pass 6901259 2022-06-27 18:03:54 2022-06-28 00:18:45 2022-06-28 00:39:40 0:20:55 0:11:27 0:09:28 smithi main centos 8.stream orch:cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
pass 6901260 2022-06-27 18:03:56 2022-06-28 00:18:45 2022-06-28 00:55:23 0:36:38 0:29:00 0:07:38 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
fail 6901261 2022-06-27 18:03:57 2022-06-28 00:18:46 2022-06-28 00:36:48 0:18:02 0:09:19 0:08:43 smithi main centos 8.stream orch:cephadm/workunits/{agent/on mon_election/classic task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}} 1
Failure Reason:

Command failed on smithi008 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b6ad5e98-f679-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi008:vg_nvme/lv_4'

dead 6901262 2022-06-27 18:03:58 2022-06-28 00:18:46 2022-06-28 00:31:23 0:12:37 0:04:06 0:08:31 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-user 3-final} 2
Failure Reason:

{'smithi087.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['http://satellite.front.sepia.ceph.com/pub/katello-ca-consumer-latest.noarch.rpm'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': False}}, 'msg': 'Failed to validate GPG signature for katello-ca-consumer-satellite.front.sepia.ceph.com-1.0-4.noarch'}, 'smithi112.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['http://satellite.front.sepia.ceph.com/pub/katello-ca-consumer-latest.noarch.rpm'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': False}}, 'msg': 'Failed to validate GPG signature for katello-ca-consumer-satellite.front.sepia.ceph.com-1.0-4.noarch'}}

dead 6901263 2022-06-27 18:03:59 2022-06-28 00:18:46 2022-06-28 00:36:58 0:18:12 0:03:40 0:14:32 smithi main rhel 8.6 orch:cephadm/thrash/{0-distro/rhel_8.6_container_tools_3.0 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

{'smithi145.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['http://satellite.front.sepia.ceph.com/pub/katello-ca-consumer-latest.noarch.rpm'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': False}}, 'msg': 'Failed to validate GPG signature for katello-ca-consumer-satellite.front.sepia.ceph.com-1.0-4.noarch'}, 'smithi050.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': False, 'invocation': {'module_args': {'allow_downgrade': False, 'allowerasing': False, 'autoremove': False, 'bugfix': False, 'conf_file': None, 'disable_excludes': None, 'disable_gpg_check': False, 'disable_plugin': [], 'disablerepo': [], 'download_dir': None, 'download_only': False, 'enable_plugin': [], 'enablerepo': [], 'exclude': [], 'install_repoquery': True, 'install_weak_deps': True, 'installroot': '/', 'list': None, 'lock_timeout': 30, 'name': ['http://satellite.front.sepia.ceph.com/pub/katello-ca-consumer-latest.noarch.rpm'], 'releasever': None, 'security': False, 'skip_broken': False, 'state': 'present', 'update_cache': False, 'update_only': False, 'validate_certs': False}}, 'msg': 'Failed to validate GPG signature for katello-ca-consumer-satellite.front.sepia.ceph.com-1.0-4.noarch'}}

fail 6901264 2022-06-27 18:04:01 2022-06-28 02:31:31 2022-06-28 02:57:37 0:26:06 0:15:14 0:10:52 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_3.0 fixed-2 mode/root mon_election/connectivity msgr/async-v2only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi038 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5606b45e-f68d-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi038:vg_nvme/lv_4'

fail 6901265 2022-06-27 18:04:02 2022-06-28 02:31:31 2022-06-28 03:15:10 0:43:39 0:35:29 0:08:10 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs-ingress 3-final} 2
Failure Reason:

reached maximum tries (300) after waiting for 300 seconds

fail 6901266 2022-06-27 18:04:03 2022-06-28 02:31:32 2022-06-28 02:47:06 0:15:34 0:09:27 0:06:07 smithi main rhel 8.6 orch:cephadm/smoke/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop agent/off fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi073 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 117afe2c-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi073:/dev/nvme4n1'

pass 6901267 2022-06-27 18:04:04 2022-06-28 02:31:32 2022-06-28 03:40:32 1:09:00 0:54:40 0:14:20 smithi main ubuntu 20.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/on mon_election/connectivity} 2
fail 6901268 2022-06-27 18:04:05 2022-06-28 02:31:32 2022-06-28 02:54:50 0:23:18 0:12:40 0:10:38 smithi main orch:cephadm/workunits/{agent/off mon_election/connectivity task/test_nfs} 1
Failure Reason:

Command failed on smithi092 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 8f8eb4c0-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi092:vg_nvme/lv_4'

fail 6901269 2022-06-27 18:04:07 2022-06-28 02:31:33 2022-06-28 03:06:40 0:35:07 0:26:26 0:08:41 smithi main ubuntu 20.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/nfs-ingress2 3-final} 2
Failure Reason:

Command failed on smithi022 with status 32: "sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mount -t nfs 10.0.31.22:/fake /mnt/foo -o port=2999'"

dead 6901270 2022-06-27 18:04:08 2022-06-28 02:31:33 2022-06-28 02:47:45 0:16:12 0:05:53 0:10:19 smithi main rhel 8.6 orch:cephadm/osds/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

{'smithi146.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

pass 6901271 2022-06-27 18:04:09 2022-06-28 02:31:33 2022-06-28 03:13:22 0:41:49 0:31:40 0:10:09 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
fail 6901272 2022-06-27 18:04:10 2022-06-28 02:31:34 2022-06-28 03:10:46 0:39:12 0:31:14 0:07:58 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/nfs 3-final} 2
Failure Reason:

reached maximum tries (300) after waiting for 300 seconds

fail 6901273 2022-06-27 18:04:11 2022-06-28 02:31:34 2022-06-28 02:55:14 0:23:40 0:14:27 0:09:13 smithi main rhel 8.6 orch:cephadm/thrash/{0-distro/rhel_8.6_container_tools_rhel8 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi156 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1fba39ac-f68d-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi156:vg_nvme/lv_4'

fail 6901274 2022-06-27 18:04:12 2022-06-28 02:31:34 2022-06-28 02:51:56 0:20:22 0:14:01 0:06:21 smithi main rhel 8.6 orch:cephadm/with-work/{0-distro/rhel_8.6_container_tools_rhel8 fixed-2 mode/packaged mon_election/classic msgr/async start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi037 with status 22: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6fef6088-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi037:vg_nvme/lv_4'

fail 6901275 2022-06-27 18:04:14 2022-06-28 02:31:35 2022-06-28 03:24:29 0:52:54 0:45:08 0:07:46 smithi main centos 8.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/16.2.5 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

reached maximum tries (300) after waiting for 300 seconds

fail 6901276 2022-06-27 18:04:15 2022-06-28 02:31:35 2022-06-28 02:54:03 0:22:28 0:09:35 0:12:53 smithi main ubuntu 20.04 orch:cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/on fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi101 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c0146c8e-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi101:/dev/nvme4n1'

fail 6901277 2022-06-27 18:04:16 2022-06-28 02:31:36 2022-06-28 02:54:12 0:22:36 0:11:48 0:10:48 smithi main orch:cephadm/workunits/{agent/on mon_election/classic task/test_orch_cli} 1
Failure Reason:

Command failed on smithi062 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d05635aa-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi062:vg_nvme/lv_4'

fail 6901278 2022-06-27 18:04:17 2022-06-28 02:31:36 2022-06-28 03:09:11 0:37:35 0:29:39 0:07:56 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-services/nfs2 3-final} 2
Failure Reason:

reached maximum tries (300) after waiting for 300 seconds

fail 6901279 2022-06-27 18:04:18 2022-06-28 02:31:36 2022-06-28 02:57:44 0:26:08 0:14:49 0:11:19 smithi main rhel 8.6 orch:cephadm/osds/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi070 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 9f9d3b34-f68c-11ec-842c-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nHOST=$(hostname -s)\nOSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk \'"\'"\'{print $1}\'"\'"\')\necho "host $HOST, osd $OSD"\nceph orch daemon stop $OSD\nwhile ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done\nceph auth export $OSD > k\nceph orch daemon rm $OSD --force\nceph orch ps --refresh\nwhile ceph orch ps | grep $OSD ; do sleep 5 ; done\nceph auth add $OSD -i k\nceph cephadm osd activate $HOST\nwhile ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done\n\''

pass 6901280 2022-06-27 18:04:19 2022-06-28 02:31:36 2022-06-28 03:03:14 0:31:38 0:22:20 0:09:18 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} 2
pass 6901281 2022-06-27 18:04:20 2022-06-28 02:31:37 2022-06-28 03:05:30 0:33:53 0:27:18 0:06:35 smithi main centos 8.stream orch:cephadm/upgrade/{1-start-distro/1-start-centos_8.stream_container-tools 2-repo_digest/defaut 3-upgrade/simple 4-wait 5-upgrade-ls agent/on mon_election/classic} 2
fail 6901282 2022-06-27 18:04:21 2022-06-28 02:31:37 2022-06-28 02:54:46 0:23:09 0:13:31 0:09:38 smithi main ubuntu 20.04 orch:cephadm/thrash/{0-distro/ubuntu_20.04 1-start 2-thrash 3-tasks/radosbench fixed-2 msgr/async-v1only root} 2
Failure Reason:

Command failed on smithi017 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c8e7f650-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi017:vg_nvme/lv_4'

fail 6901283 2022-06-27 18:04:23 2022-06-28 02:31:38 2022-06-28 02:56:31 0:24:53 0:12:43 0:12:10 smithi main ubuntu 20.04 orch:cephadm/with-work/{0-distro/ubuntu_20.04 fixed-2 mode/root mon_election/connectivity msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

Command failed on smithi103 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e8473ba0-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi103:vg_nvme/lv_4'

pass 6901284 2022-06-27 18:04:24 2022-06-28 02:31:38 2022-06-28 03:08:23 0:36:45 0:30:00 0:06:45 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
pass 6901285 2022-06-27 18:04:25 2022-06-28 02:31:38 2022-06-28 02:53:12 0:21:34 0:14:35 0:06:59 smithi main centos 8.stream orch:cephadm/orchestrator_cli/{0-random-distro$/{centos_8.stream_container_tools_crun} 2-node-mgr agent/on orchestrator_cli} 2
fail 6901286 2022-06-27 18:04:26 2022-06-28 02:31:39 2022-06-28 02:47:36 0:15:57 0:09:06 0:06:51 smithi main centos 8.stream orch:cephadm/smoke/{0-distro/centos_8.stream_container_tools 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi047 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 1605cc6a-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi047:/dev/nvme4n1'

fail 6901287 2022-06-27 18:04:27 2022-06-28 02:31:39 2022-06-28 02:46:16 0:14:37 0:07:15 0:07:22 smithi main centos 8.stream orch:cephadm/smoke-singlehost/{0-random-distro$/{centos_8.stream_container_tools_crun} 1-start 2-services/rgw 3-final} 1
Failure Reason:

Command failed on smithi026 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ae4a25ee-f68b-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi026:vg_nvme/lv_4'

pass 6901288 2022-06-27 18:04:29 2022-06-28 02:31:39 2022-06-28 02:50:39 0:19:00 0:09:41 0:09:19 smithi main orch:cephadm/workunits/{agent/on mon_election/classic task/test_adoption} 1
pass 6901289 2022-06-27 18:04:30 2022-06-28 02:31:40 2022-06-28 02:55:05 0:23:25 0:16:06 0:07:19 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-services/rgw 3-final} 2
pass 6901290 2022-06-27 18:04:31 2022-06-28 02:31:40 2022-06-28 02:54:26 0:22:46 0:15:11 0:07:35 smithi main rhel 8.6 orch:cephadm/osds/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-ops/repave-all} 2
pass 6901291 2022-06-27 18:04:32 2022-06-28 02:31:40 2022-06-28 03:01:57 0:30:17 0:17:12 0:13:05 smithi main ubuntu 20.04 orch:cephadm/smoke-roleless/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-services/basic 3-final} 2
fail 6901292 2022-06-27 18:04:33 2022-06-28 02:31:41 2022-06-28 03:00:53 0:29:12 0:17:35 0:11:37 smithi main orch:cephadm/workunits/{agent/off mon_election/connectivity task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi154 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6815a256a07bf14ab703757b4e123bd2bc9f6e8a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

fail 6901293 2022-06-27 18:04:34 2022-06-28 02:31:41 2022-06-28 02:50:33 0:18:52 0:10:18 0:08:34 smithi main centos 8.stream orch:cephadm/smoke/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/off fixed-2 mon_election/connectivity start} 2
Failure Reason:

Command failed on smithi064 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 527d1112-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi064:/dev/nvme4n1'

pass 6901294 2022-06-27 18:04:35 2022-06-28 02:31:41 2022-06-28 02:54:19 0:22:38 0:14:01 0:08:37 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-services/client-keyring 3-final} 2
fail 6901295 2022-06-27 18:04:36 2022-06-28 02:31:42 2022-06-28 02:54:08 0:22:26 0:11:59 0:10:27 smithi main centos 8.stream orch:cephadm/thrash/{0-distro/centos_8.stream_container_tools 1-start 2-thrash 3-tasks/small-objects fixed-2 msgr/async-v2only root} 2
Failure Reason:

Command failed on smithi120 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 08e4954c-f68d-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi120:vg_nvme/lv_4'

fail 6901296 2022-06-27 18:04:37 2022-06-28 02:31:42 2022-06-28 02:52:39 0:20:57 0:12:13 0:08:44 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools fixed-2 mode/packaged mon_election/classic msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

Command failed on smithi107 with status 22: 'sudo cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c7cb84c6-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi107:vg_nvme/lv_4'

dead 6901297 2022-06-27 18:04:38 2022-06-28 02:31:42 2022-06-28 14:46:58 12:15:16 smithi main centos 8.stream orch:cephadm/mgr-nfs-upgrade/{0-centos_8.stream_container_tools 1-bootstrap/octopus 1-start 2-nfs 3-upgrade-with-workload 4-final} 2
Failure Reason:

hit max job timeout

pass 6901298 2022-06-27 18:04:39 2022-06-28 02:31:43 2022-06-28 03:09:41 0:37:58 0:29:35 0:08:23 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
pass 6901299 2022-06-27 18:04:40 2022-06-28 02:31:43 2022-06-28 02:56:49 0:25:06 0:15:49 0:09:17 smithi main centos 8.stream orch:cephadm/smoke-roleless/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-services/iscsi 3-final} 2
pass 6901300 2022-06-27 18:04:41 2022-06-28 02:31:43 2022-06-28 03:37:52 1:06:09 0:55:15 0:10:54 smithi main ubuntu 20.04 orch:cephadm/upgrade/{1-start-distro/1-start-ubuntu_20.04-15.2.9 2-repo_digest/repo_digest 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/connectivity} 2
pass 6901301 2022-06-27 18:04:43 2022-06-28 02:31:44 2022-06-28 02:46:49 0:15:05 0:05:57 0:09:08 smithi main orch:cephadm/workunits/{agent/on mon_election/classic task/test_cephadm_repos} 1
fail 6901302 2022-06-27 18:04:44 2022-06-28 02:31:44 2022-06-28 03:05:24 0:33:40 0:21:04 0:12:36 smithi main ubuntu 20.04 orch:cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi050 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a4a396fa-f68c-11ec-842c-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

pass 6901303 2022-06-27 18:04:45 2022-06-28 02:31:44 2022-06-28 02:58:03 0:26:19 0:19:32 0:06:47 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-services/mirror 3-final} 2
fail 6901304 2022-06-27 18:04:46 2022-06-28 02:31:45 2022-06-28 02:53:28 0:21:43 0:11:27 0:10:16 smithi main rhel 8.6 orch:cephadm/smoke/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop agent/on fixed-2 mon_election/classic start} 2
Failure Reason:

Command failed on smithi008 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a186ef6c-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi008:/dev/nvme4n1'

fail 6901305 2022-06-27 18:04:47 2022-06-28 02:31:45 2022-06-28 02:53:31 0:21:46 0:12:06 0:09:40 smithi main centos 8.stream orch:cephadm/thrash/{0-distro/centos_8.stream_container_tools_crun 1-start 2-thrash 3-tasks/snaps-few-objects fixed-2 msgr/async root} 2
Failure Reason:

Command failed on smithi133 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f32d9618-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi133:vg_nvme/lv_4'

fail 6901306 2022-06-27 18:04:48 2022-06-28 02:31:45 2022-06-28 02:53:29 0:21:44 0:12:07 0:09:37 smithi main centos 8.stream orch:cephadm/with-work/{0-distro/centos_8.stream_container_tools_crun fixed-2 mode/root mon_election/connectivity msgr/async start tasks/rados_python} 2
Failure Reason:

Command failed on smithi109 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid edd0a1b0-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi109:vg_nvme/lv_4'

fail 6901307 2022-06-27 18:04:49 2022-06-28 02:31:46 2022-06-28 03:12:07 0:40:21 0:32:54 0:07:27 smithi main rhel 8.6 orch:cephadm/smoke-roleless/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-services/nfs-ingress-rgw-bucket 3-final} 2
Failure Reason:

reached maximum tries (300) after waiting for 300 seconds

pass 6901308 2022-06-27 18:04:50 2022-06-28 02:31:46 2022-06-28 03:10:49 0:39:03 0:31:22 0:07:41 smithi main centos 8.stream orch:cephadm/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-with-workload 4-verify}} 2
fail 6901309 2022-06-27 18:04:51 2022-06-28 02:31:47 2022-06-28 02:49:35 0:17:48 0:11:21 0:06:27 smithi main centos 8.stream orch:cephadm/workunits/{agent/off mon_election/connectivity task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}} 1
Failure Reason:

Command failed on smithi203 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6815a256a07bf14ab703757b4e123bd2bc9f6e8a shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 58ce0486-f68c-11ec-842c-001a4aab830c -- ceph orch daemon add osd smithi203:vg_nvme/lv_4'

pass 6901310 2022-06-27 18:04:52 2022-06-28 02:31:47 2022-06-28 02:55:02 0:23:15 0:15:08 0:08:07 smithi main centos 8.stream orch:cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2