ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 18.04
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}}
Command failed (workunit test cephtool/test.sh) on smithi007 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e232ed1e9fda5674e2bd2091b3053384471252ab TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/upgrade/luminous-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/luminous.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-nautilus.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} supported-random-distro$/{rhel_latest.yaml} thrashosds-health.yaml}
Command failed on smithi192 with status 124: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph -- tell 'mon.*' injectargs --mon_health_to_clog=false"
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 18.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml rocksdb.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/redirect.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 18.04
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync.yaml workloads/rados_api_tests.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/fastclose.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/rbd_cls.yaml}
Command crashed: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'ceph_test_cls_rbd --gtest_filter=-TestClsRbd.get_features:TestClsRbd.parents'"
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-erasure-code-overwrites/{bluestore.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml rocksdb.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{rhel_latest.yaml} tasks/dashboard.yaml}
failed to become clean before timeout expired
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Command failed on smithi201 with status 1: '\n sudo yum -y install rbd-fuse\n '
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml rocksdb.yaml supported-random-distro$/{centos_latest.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml}
"2018-12-06 17:46:47.103274 osd.5 (osd.5) 1 : cluster [WRN] Monitor daemon marked osd.5 down, but it is still running" in cluster log
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} leveldb.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/random.yaml objectstore/bluestore.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml}}
Command failed (workunit test cephtool/test.sh) on smithi061 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e232ed1e9fda5674e2bd2091b3053384471252ab TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/osd-delay.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/careful.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command failed on smithi076 with status 1: 'CEPH_CLIENT_ID=2 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 
rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} tasks/rados_striper.yaml}
"2018-12-06 17:17:35.931988 mon.a (mon.0) 149 : cluster [WRN] Health check failed: Reduced data availability: 2 pgs inactive, 2 pgs peering (PG_AVAILABILITY)" in cluster log
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/monthrash/{ceph.yaml clusters/9-mons.yaml mon_kv_backend/leveldb.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml supported-random-distro$/{ubuntu_16.04.yaml} thrashers/one.yaml workloads/snaps-few-objects.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/singleton/{all/mon-seesaw.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
too many values to unpack
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/fastclose.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/default.yaml thrashosds-health.yaml workloads/radosbench.yaml}
Command failed on smithi125 with status 1: 'sudo ceph --cluster ceph osd crush tunables hammer'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} leveldb.yaml msgr-failures/few.yaml objectstore/bluestore.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/simple.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_latest.yaml}}
Command failed (workunit test cephtool/test.sh) on smithi003 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e232ed1e9fda5674e2bd2091b3053384471252ab TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/standalone/{supported-random-distro$/{ubuntu_16.04.yaml} workloads/mon.yaml}
Command failed (workunit test mon/misc.sh) on smithi099 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e232ed1e9fda5674e2bd2091b3053384471252ab TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/mon/misc.sh'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/filestore-xfs.yaml rados.yaml rocksdb.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/rbd_cls.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore.yaml supported-random-distro$/{rhel_latest.yaml} tasks/prometheus.yaml}
failed to become clean before timeout expired
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml leveldb.yaml msgr-failures/few.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml}
Scrubbing terminated -- not all pgs were active and clean.
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml msgr/simple.yaml rados.yaml rocksdb.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml}
Command failed on smithi160 with status 1: 'CEPH_CLIENT_ID=2 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op delete 50 --pool unique_pool_0'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/standalone/{supported-random-distro$/{ubuntu_16.04.yaml} workloads/osd.yaml}
Command failed (workunit test osd/osd-fast-mark-down.sh) on smithi149 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e232ed1e9fda5674e2bd2091b3053384471252ab TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/osd/osd-fast-mark-down.sh'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/singleton-nomsgr/{all/msgr.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/monthrash/{ceph.yaml clusters/3-mons.yaml mon_kv_backend/rocksdb.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml} thrashers/force-sync-many.yaml workloads/rados_api_tests.yaml}
"2018-12-06 17:49:59.506200 mon.a (mon.0) 365 : cluster [WRN] Health check failed: 1 osds down (OSD_DOWN)" in cluster log
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 16.04
rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/crush-compat.yaml distro$/{ubuntu_16.04.yaml} msgr-failures/fastclose.yaml msgr/async.yaml rados.yaml rocksdb.yaml thrashers/none.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml}
Command failed on smithi059 with status 1: 'sudo ceph --cluster ceph osd crush tunables hammer'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml supported-random-distro$/{rhel_latest.yaml}}
Command failed (workunit test cephtool/test.sh) on smithi193 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e232ed1e9fda5674e2bd2091b3053384471252ab TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephtool/test.sh'
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 18.04
rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} mon_kv_backend/rocksdb.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rgw_snaps.yaml}
"2018-12-06 17:20:27.706794 mon.b (mon.0) 92 : cluster [WRN] Health check failed: Reduced data availability: 2 pgs inactive, 2 pgs peering (PG_AVAILABILITY)" in cluster log
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
ubuntu 18.04
rados/standalone/{supported-random-distro$/{ubuntu_latest.yaml} workloads/scrub.yaml}
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
rhel 7.5
rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{rhel_latest.yaml} tasks/dashboard.yaml}
Test failure: test_invalid_user_id (tasks.mgr.dashboard.test_rgw.RgwApiCredentialsTest)
wip-sage-testing-2018-12-05-1258
wip-sage-testing-2018-12-05-1258
wip-addrvec
smithi
centos 7.5
rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml two-plus-three.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml msgr/random.yaml rados.yaml rocksdb.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml}
Command failed on smithi130 with status 1: 'CEPH_CLIENT_ID=2 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 4000 --objects 500 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op snap_remove 50 --op snap_create 50 --op rollback 50 --op read 100 --op copy_from 50 --op write 50 --op write_excl 50 --op cache_try_flush 50 --op cache_flush 50 --op cache_evict 50 --op delete 50 --pool base'