Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 5944460 2021-03-07 15:05:20 2021-03-07 15:06:54 2021-03-07 15:44:19 0:37:25 0:30:23 0:07:02 smithi master ubuntu 18.04 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-comp-lz4 rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
pass 5944461 2021-03-07 15:05:21 2021-03-07 15:06:54 2021-03-07 15:40:10 0:33:16 0:26:35 0:06:41 smithi master rados/upgrade/pacific-x/rgw-multisite/{clusters frontend overrides realm tasks upgrade/primary} 2
fail 5944462 2021-03-07 15:05:22 2021-03-07 15:06:54 2021-03-07 15:26:45 0:19:51 0:12:35 0:07:16 smithi master ubuntu 20.04 rados/cephadm/dashboard/{0-distro/ubuntu_20.04_kubic_testing task/test_e2e} 2
Failure Reason:

Command failed (workunit test cephadm/create_iscsi_disks.sh) on smithi052 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=046d34e1487b5b43f044d66a72a077624af74765 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/cephadm/create_iscsi_disks.sh'

pass 5944463 2021-03-07 15:05:23 2021-03-07 15:07:45 2021-03-07 15:48:01 0:40:16 0:27:36 0:12:40 smithi master centos 8.2 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/few objectstore/bluestore-comp-zlib rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
fail 5944464 2021-03-07 15:05:24 2021-03-07 15:09:05 2021-03-07 15:32:17 0:23:12 0:17:11 0:06:01 smithi master ubuntu 20.04 rados/upgrade/pacific-x/parallel/{0-start 1-tasks distro1$/{ubuntu_20.04_kubic_testing} mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}} 2
Failure Reason:

Command failed on smithi019 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/daemon-base:latest-pacific shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 923230d2-7f58-11eb-9075-001a4aab830c -e sha1=6a5ee1eee6d7bede3f5918f1543f4c71f8d65d5f -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

fail 5944465 2021-03-07 15:05:25 2021-03-07 15:09:05 2021-03-07 15:28:19 0:19:14 0:13:02 0:06:12 smithi master ubuntu 20.04 rados/cephadm/dashboard/{0-distro/ubuntu_20.04_kubic_stable task/test_e2e} 2
Failure Reason:

Command failed (workunit test cephadm/create_iscsi_disks.sh) on smithi017 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=046d34e1487b5b43f044d66a72a077624af74765 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/cephadm/create_iscsi_disks.sh'

fail 5944466 2021-03-07 15:05:26 2021-03-07 15:09:06 2021-03-07 15:27:02 0:17:56 0:10:09 0:07:47 smithi master ubuntu 20.04 rados/cephadm/dashboard/{0-distro/ubuntu_20.04_kubic_testing task/test_e2e} 2
Failure Reason:

Command failed on smithi173 with status 126: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:6a5ee1eee6d7bede3f5918f1543f4c71f8d65d5f shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid f5eeea0c-7f58-11eb-9075-001a4aab830c -- ceph orch host ls --format=json'

fail 5944467 2021-03-07 15:05:27 2021-03-07 15:09:46 2021-03-07 15:33:31 0:23:45 0:17:03 0:06:42 smithi master ubuntu 20.04 rados/upgrade/pacific-x/parallel/{0-start 1-tasks distro1$/{ubuntu_20.04} mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}} 2
Failure Reason:

Command failed on smithi075 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/daemon-base:latest-pacific shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid b5426d44-7f58-11eb-9075-001a4aab830c -e sha1=6a5ee1eee6d7bede3f5918f1543f4c71f8d65d5f -- bash -c \'ceph versions | jq -e \'"\'"\'.overall | length == 1\'"\'"\'\''

fail 5944468 2021-03-07 15:05:28 2021-03-07 15:09:56 2021-03-07 15:28:57 0:19:01 0:13:10 0:05:51 smithi master ubuntu 20.04 rados/cephadm/dashboard/{0-distro/ubuntu_20.04_kubic_stable task/test_e2e} 2
Failure Reason:

Command failed (workunit test cephadm/create_iscsi_disks.sh) on smithi053 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=046d34e1487b5b43f044d66a72a077624af74765 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/cephadm/create_iscsi_disks.sh'