ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
rhel 7.5
rados/singleton-nomsgr/{all/msgr.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
ubuntu 16.04
rados/standalone/{supported-random-distro$/{ubuntu_16.04.yaml} workloads/osd.yaml}
Command failed (workunit test osd/osd-rep-recov-eio.sh) on smithi104 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=983f2685ad3afaea8d10031bd48e25bd6cb89340 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/osd/osd-rep-recov-eio.sh'
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
rhel 7.5
rados/standalone/{supported-random-distro$/{rhel_latest.yaml} workloads/scrub.yaml}
Command failed (workunit test scrub/osd-scrub-repair.sh) on smithi159 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=983f2685ad3afaea8d10031bd48e25bd6cb89340 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-scrub-repair.sh'
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
centos 7.5
rados/upgrade/luminous-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/luminous.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-nautilus.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} supported-random-distro$/{centos_latest.yaml} thrashosds-health.yaml}
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
rhel 7.5
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{rhel_latest.yaml} tasks/dashboard.yaml}
Test failure: test_add_osd_flag (tasks.mgr.dashboard.test_osd.OsdFlagsTest)
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
ubuntu 18.04
rados/standalone/{supported-random-distro$/{ubuntu_latest.yaml} workloads/erasure-code.yaml}
Command failed (workunit test erasure-code/test-erasure-eio.sh) on smithi062 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=983f2685ad3afaea8d10031bd48e25bd6cb89340 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/erasure-code/test-erasure-eio.sh'
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
centos 7.5
rados/multimon/{clusters/9.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_latest.yaml} tasks/mon_clock_no_skews.yaml}
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
ubuntu 16.04
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/module_selftest.yaml}
Test failure: test_devicehealth (tasks.mgr.test_module_selftest.TestModuleSelftest)
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml rocksdb.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml}
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml rocksdb.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml}
wip-sage4-testing-2019-01-12-0651
wip-sage4-testing-2019-01-12-0651
master
smithi
centos 7.5
rados/standalone/{supported-random-distro$/{centos_latest.yaml} workloads/mon.yaml}
Command failed (workunit test mon/mon-handle-forward.sh) on smithi139 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=983f2685ad3afaea8d10031bd48e25bd6cb89340 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/mon/mon-handle-forward.sh'