Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 3479730 2019-01-18 21:50:18 2019-01-18 22:44:42 2019-01-18 23:40:42 0:56:00 0:46:46 0:09:14 smithi master centos 7.5 rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}} 1
Failure Reason:

Command failed (workunit test cephtool/test.sh) on smithi001 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6340e1cbcd3009207e3b713a14843817d3f7e621 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'

pass 3479731 2019-01-18 21:50:19 2019-01-18 22:44:43 2019-01-19 00:30:43 1:46:00 1:07:36 0:38:24 smithi master ubuntu 16.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
pass 3479732 2019-01-18 21:50:19 2019-01-18 22:44:43 2019-01-18 23:42:43 0:58:00 0:17:06 0:40:54 smithi master centos 7.5 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml supported-random-distro$/{centos_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
pass 3479733 2019-01-18 21:50:20 2019-01-18 22:44:43 2019-01-18 23:56:43 1:12:00 0:14:24 0:57:36 smithi master centos 7.5 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml supported-random-distro$/{centos_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
fail 3479734 2019-01-18 21:50:21 2019-01-18 22:45:16 2019-01-18 23:31:16 0:46:00 0:37:34 0:08:26 smithi master ubuntu 16.04 rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}} 1
Failure Reason:

Command failed (workunit test cephtool/test.sh) on smithi143 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6340e1cbcd3009207e3b713a14843817d3f7e621 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'

pass 3479735 2019-01-18 21:50:21 2019-01-18 22:45:55 2019-01-18 23:15:55 0:30:00 0:22:37 0:07:23 smithi master rhel 7.5 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} tasks/rgw_snaps.yaml} 2
fail 3479736 2019-01-18 21:50:22 2019-01-18 22:46:27 2019-01-18 23:44:27 0:58:00 0:46:11 0:11:49 smithi master centos 7.5 rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}} 1
Failure Reason:

Command failed (workunit test cephtool/test.sh) on smithi064 with status 22: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6340e1cbcd3009207e3b713a14843817d3f7e621 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'

pass 3479737 2019-01-18 21:50:23 2019-01-18 22:46:28 2019-01-18 23:14:28 0:28:00 0:20:31 0:07:29 smithi master rhel 7.5 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/osd-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2