Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 6449130 2021-10-18 19:06:01 2021-10-18 19:09:37 2021-10-18 19:33:38 0:24:01 0:12:46 0:11:15 smithi master centos 8.2 orch:cephadm:osds/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi109 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0bf73984-3049-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449132 2021-10-18 19:06:02 2021-10-18 19:09:37 2021-10-18 19:29:39 0:20:02 0:09:22 0:10:40 smithi master centos 8.3 orch:cephadm:osds/{0-distro/centos_8.3_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi138 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f231168c-3048-11ec-8c28-001a4aab830c -- ceph mon dump -f json'

fail 6449134 2021-10-18 19:06:03 2021-10-18 19:09:38 2021-10-18 19:31:41 0:22:03 0:16:01 0:06:02 smithi master rhel 8.4 orch:cephadm:osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi187 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fbccb3c2-3048-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449136 2021-10-18 19:06:04 2021-10-18 19:11:24 2021-10-18 19:35:25 0:24:01 0:16:44 0:07:17 smithi master rhel 8.4 orch:cephadm:osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi195 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 46d44484-3049-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449138 2021-10-18 19:06:05 2021-10-18 19:13:23 2021-10-18 19:39:23 0:26:00 0:14:15 0:11:45 smithi master ubuntu 20.04 orch:cephadm:osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi143 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5499fa64-3049-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449140 2021-10-18 19:06:06 2021-10-18 19:13:31 2021-10-18 19:35:31 0:22:00 0:12:34 0:09:26 smithi master centos 8.2 orch:cephadm:osds/{0-distro/centos_8.2_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi035 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 77398f4e-3049-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449142 2021-10-18 19:06:07 2021-10-18 19:13:32 2021-10-18 19:33:32 0:20:00 0:08:57 0:11:03 smithi master centos 8.3 orch:cephadm:osds/{0-distro/centos_8.3_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi146 with status 126: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid a297c94e-3049-11ec-8c28-001a4aab830c -- ceph mon dump -f json'

fail 6449144 2021-10-18 19:06:08 2021-10-18 19:16:54 2021-10-18 19:38:55 0:22:01 0:15:46 0:06:15 smithi master rhel 8.4 orch:cephadm:osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi103 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 11b7db02-304a-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449146 2021-10-18 19:06:09 2021-10-18 19:18:53 2021-10-18 19:40:53 0:22:00 0:16:27 0:05:33 smithi master rhel 8.4 orch:cephadm:osds/{0-distro/rhel_8.4_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rm-zap-add} 2
Failure Reason:

Command failed on smithi096 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 48d3bdcc-304a-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nceph orch daemon add osd $HOST:$DEV\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''

fail 6449148 2021-10-18 19:06:10 2021-10-18 19:18:59 2021-10-18 19:44:59 0:26:00 0:13:52 0:12:08 smithi master ubuntu 20.04 orch:cephadm:osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
Failure Reason:

Command failed on smithi062 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:ba88d0a8c1dd128d864cff753c73d2d788e27a57 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 58c39d74-304a-11ec-8c28-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nceph orch device ls\nDEVID=$(ceph device ls | grep osd.1 | awk \'"\'"\'{print $1}\'"\'"\')\nHOST=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $1}\'"\'"\')\nDEV=$(ceph orch device ls | grep $DEVID | awk \'"\'"\'{print $2}\'"\'"\')\necho "host $HOST, dev $DEV, devid $DEVID"\nceph orch osd rm 1\nwhile ceph orch osd rm status | grep ^1 ; do sleep 5 ; done\nceph orch device zap $HOST $DEV --force\nwhile ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done\n\''