Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 6676993 2022-02-11 17:04:50 2022-02-11 17:05:41 2022-02-11 17:27:52 0:22:11 0:11:53 0:10:18 smithi master centos 8.stream rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} tasks/rados_python} 2
pass 6676994 2022-02-11 17:04:50 2022-02-11 17:05:41 2022-02-11 17:27:12 0:21:31 0:12:35 0:08:56 smithi master ubuntu 20.04 rados/singleton/{all/rebuild-mondb mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest}} 1
pass 6676995 2022-02-11 17:04:51 2022-02-11 17:05:41 2022-02-11 17:31:00 0:25:19 0:14:40 0:10:39 smithi master rados/cephadm/workunits/{agent/on mon_election/connectivity task/test_cephadm} 1
fail 6676996 2022-02-11 17:04:52 2022-02-11 17:05:41 2022-02-11 17:28:36 0:22:55 0:16:25 0:06:30 smithi master rhel 8.4 rados/cephadm/osds/{0-distro/rhel_8.4_container_tools_3.0 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi102 with status 127: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:31d9c206473bba8bc2236acfc6bcf79cb54a65c6 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f1b1af70-8b5e-11ec-8c35-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nHOST=$(hostname -s)\nOSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk \'"\'"\'{print $1}\'"\'"\')\necho "host $HOST, osd $OSD"\nceph orch daemon stop $OSD\nwhile ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done\nceph auth export $OSD > k\nceph orch daemon rm $OSD --force\nceph orch ps --refresh\nwhile ceph orch ps | grep $OSD ; do sleep 5 ; done\nceph auth add $OSD -i k\nceph cephadm osd activate $HOST\nwhile ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done\n\''

pass 6676997 2022-02-11 17:04:53 2022-02-11 17:05:42 2022-02-11 17:42:59 0:37:17 0:28:13 0:09:04 smithi master rados/cephadm/workunits/{agent/on mon_election/connectivity task/test_nfs} 1
pass 6676998 2022-02-11 17:04:54 2022-02-11 17:05:42 2022-02-11 17:34:56 0:29:14 0:19:48 0:09:26 smithi master ubuntu 20.04 rados/singleton/{all/thrash_cache_writeback_proxy_none mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest}} 2
pass 6676999 2022-02-11 17:04:55 2022-02-11 17:05:42 2022-02-11 17:42:48 0:37:06 0:28:43 0:08:23 smithi master centos 8.stream rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/enable mon_election/connectivity random-objectstore$/{bluestore-comp-zlib} supported-random-distro$/{centos_8} tasks/module_selftest} 2
pass 6677000 2022-02-11 17:04:56 2022-02-11 17:05:52 2022-02-11 17:45:55 0:40:03 0:31:50 0:08:13 smithi master rhel 8.4 rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/classic random-objectstore$/{bluestore-comp-zlib} supported-random-distro$/{rhel_8} tasks/progress} 2
pass 6677001 2022-02-11 17:04:57 2022-02-11 17:07:03 2022-02-11 17:26:13 0:19:10 0:09:03 0:10:07 smithi master centos 8.stream rados/cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-wait} 2
pass 6677002 2022-02-11 17:04:58 2022-02-11 17:07:23 2022-02-11 17:42:40 0:35:17 0:25:29 0:09:48 smithi master ubuntu 20.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/pool-snaps-few-objects} 2
pass 6677003 2022-02-11 17:04:59 2022-02-11 17:07:24 2022-02-11 17:32:48 0:25:24 0:14:43 0:10:41 smithi master rados/cephadm/workunits/{agent/off mon_election/classic task/test_cephadm} 1
pass 6677004 2022-02-11 17:05:00 2022-02-11 17:07:24 2022-02-11 18:26:13 1:18:49 1:09:44 0:09:05 smithi master centos 8.stream rados/dashboard/{0-single-container-host debug/mgr mon_election/connectivity random-objectstore$/{bluestore-hybrid} tasks/dashboard} 2
pass 6677005 2022-02-11 17:05:01 2022-02-11 17:07:24 2022-02-11 17:52:49 0:45:25 0:33:09 0:12:16 smithi master ubuntu 20.04 rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench 3-final cluster/1-node k8s/1.21 net/host rook/1.7.2} 1
pass 6677006 2022-02-11 17:05:02 2022-02-11 17:08:15 2022-02-11 17:33:41 0:25:26 0:15:38 0:09:48 smithi master ubuntu 20.04 rados/singleton-nomsgr/{all/admin_socket_output mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}} 1
pass 6677007 2022-02-11 17:05:03 2022-02-11 17:08:15 2022-02-11 18:42:00 1:33:45 1:25:04 0:08:41 smithi master ubuntu 20.04 rados/singleton/{all/ec-inconsistent-hinfo mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest}} 1
pass 6677008 2022-02-11 17:05:04 2022-02-11 17:08:35 2022-02-11 17:51:56 0:43:21 0:36:48 0:06:33 smithi master rhel 8.4 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/few objectstore/bluestore-hybrid rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
fail 6677009 2022-02-11 17:05:05 2022-02-11 17:08:35 2022-02-11 17:25:46 0:17:11 0:08:10 0:09:01 smithi master centos 8.stream rados/cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
Failure Reason:

Command failed on smithi100 with status 127: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:31d9c206473bba8bc2236acfc6bcf79cb54a65c6 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c60df1c6-8b5e-11ec-8c35-001a4aab830c -- bash -c \'set -e\nset -x\nceph orch ps\nHOST=$(hostname -s)\nOSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk \'"\'"\'{print $1}\'"\'"\')\necho "host $HOST, osd $OSD"\nceph orch daemon stop $OSD\nwhile ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done\nceph auth export $OSD > k\nceph orch daemon rm $OSD --force\nceph orch ps --refresh\nwhile ceph orch ps | grep $OSD ; do sleep 5 ; done\nceph auth add $OSD -i k\nceph cephadm osd activate $HOST\nwhile ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done\n\''

pass 6677010 2022-02-11 17:05:06 2022-02-11 17:08:56 2022-02-11 18:07:44 0:58:48 0:50:00 0:08:48 smithi master centos 8.stream rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-low-osd-mem-target rados tasks/rados_api_tests validater/valgrind} 2
pass 6677011 2022-02-11 17:05:07 2022-02-11 17:08:56 2022-02-11 17:32:58 0:24:02 0:14:28 0:09:34 smithi master rados/cephadm/workunits/{agent/on mon_election/connectivity task/test_orch_cli} 1
pass 6677012 2022-02-11 17:05:08 2022-02-11 17:09:26 2022-02-11 17:32:18 0:22:52 0:14:19 0:08:33 smithi master rados/cephadm/workunits/{agent/off mon_election/connectivity task/test_cephadm} 1