Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 6449746 2021-10-18 21:44:37 2021-10-18 21:49:44 2021-10-18 22:11:46 0:22:02 0:12:29 0:09:33 smithi master centos 8.2 orch:cephadm:osds/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi185 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5b17ea5c-305f-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449747 2021-10-18 21:44:38 2021-10-18 21:49:44 2021-10-18 22:11:44 0:22:00 0:12:50 0:09:10 smithi master centos 8.3 orch:cephadm:osds/{0-distro/centos_8.3_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi060 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 58c2dd5c-305f-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449748 2021-10-18 21:44:39 2021-10-18 21:51:42 2021-10-18 22:13:42 0:22:00 0:16:01 0:05:59 smithi master rhel 8.4 orch:cephadm:osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi146 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 9ad4b940-305f-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449749 2021-10-18 21:44:40 2021-10-18 21:53:41 2021-10-18 22:17:42 0:24:01 0:16:50 0:07:11 smithi master rhel 8.4 orch:cephadm:osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi019 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid ee45d38e-305f-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449750 2021-10-18 21:44:41 2021-10-18 21:53:45 2021-10-18 22:19:46 0:26:01 0:13:49 0:12:12 smithi master ubuntu 20.04 orch:cephadm:osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi023 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid debb7d42-305f-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449751 2021-10-18 21:44:42 2021-10-18 21:53:49 2021-10-18 22:15:50 0:22:01 0:12:40 0:09:21 smithi master centos 8.2 orch:cephadm:osds/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi142 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d7dffab6-305f-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449752 2021-10-18 21:44:43 2021-10-18 21:55:38 2021-10-18 22:17:39 0:22:01 0:13:01 0:09:00 smithi master centos 8.3 orch:cephadm:osds/{0-distro/centos_8.3_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi063 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 2630aab2-3060-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449753 2021-10-18 21:44:43 2021-10-18 21:55:41 2021-10-18 22:19:42 0:24:01 0:16:28 0:07:33 smithi master rhel 8.4 orch:cephadm:osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi061 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 3bb6a17a-3060-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449754 2021-10-18 21:44:44 2021-10-18 22:01:40 2021-10-18 22:23:40 0:22:00 0:16:18 0:05:42 smithi master rhel 8.4 orch:cephadm:osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi089 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0243edb6-3061-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449755 2021-10-18 21:44:45 2021-10-18 22:05:41 2021-10-18 22:31:42 0:26:01 0:14:00 0:12:01 smithi master ubuntu 20.04 orch:cephadm:osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi157 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:b53c9ab78265f6dad241b4b05aa87603f7e66e27 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 86828646-3061-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''