Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 4216136 2019-08-15 06:56:29 2019-08-15 06:56:50 2019-08-15 07:22:49 0:25:59 0:16:06 0:09:53 mira master centos 7.6 rados/singleton/{all/mon-auth-caps.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216137 2019-08-15 06:56:30 2019-08-15 06:56:50 2019-08-15 07:24:49 0:27:59 0:21:22 0:06:37 mira master rhel 7.6 rados/singleton-nomsgr/{all/version-number-sanity.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216138 2019-08-15 06:56:30 2019-08-15 06:56:49 2019-08-15 07:18:48 0:21:59 0:10:04 0:11:55 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/radosbench_4K_rand_read.yaml} 1
pass 4216139 2019-08-15 06:56:31 2019-08-15 06:56:48 2019-08-15 07:48:47 0:51:59 0:34:06 0:17:53 mira master ubuntu 18.04 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 4216140 2019-08-15 06:56:32 2019-08-15 06:56:49 2019-08-15 07:26:48 0:29:59 0:23:33 0:06:26 mira master rhel 7.6 rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/force-sync-many.yaml workloads/rados_5925.yaml} 2
pass 4216141 2019-08-15 06:56:33 2019-08-15 06:56:49 2019-08-15 07:46:48 0:49:59 0:28:53 0:21:06 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
pass 4216142 2019-08-15 06:56:34 2019-08-15 06:56:49 2019-08-15 07:52:48 0:55:59 0:10:54 0:45:05 mira master ubuntu 18.04 rados/singleton/{all/mon-config-key-caps.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
fail 4216143 2019-08-15 06:56:35 2019-08-15 06:56:49 2019-08-15 07:42:48 0:45:59 0:26:54 0:19:05 mira master ubuntu 18.04 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_api_tests.yaml} 2
Failure Reason:

"2019-08-15T07:33:17.234309+0000 mon.a (mon.0) 1783 : cluster [WRN] Health check failed: Degraded data redundancy: 1 pg degraded (PG_DEGRADED)" in cluster log

fail 4216144 2019-08-15 06:56:35 2019-08-15 06:56:49 2019-08-15 07:28:48 0:31:59 0:19:09 0:12:50 mira master centos 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{centos_7.yaml} tasks/orchestrator_cli.yaml} 2
Failure Reason:

"2019-08-15T07:26:44.318315+0000 mds.c (mds.0) 1 : cluster [WRN] evicting unresponsive client mira063:x (4687), after 303.375 seconds" in cluster log

pass 4216145 2019-08-15 06:56:36 2019-08-15 06:56:50 2019-08-15 07:58:49 1:01:59 0:21:27 0:40:32 mira master ubuntu 18.04 rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 4216146 2019-08-15 06:56:37 2019-08-15 07:18:50 2019-08-15 08:04:50 0:46:00 0:39:36 0:06:24 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
pass 4216147 2019-08-15 06:56:38 2019-08-15 07:22:50 2019-08-15 07:52:50 0:30:00 0:19:43 0:10:17 mira master centos 7.6 rados/singleton/{all/mon-config-keys.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216148 2019-08-15 06:56:39 2019-08-15 07:24:50 2019-08-15 07:42:50 0:18:00 0:09:19 0:08:41 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/radosbench_4K_seq_read.yaml} 1
dead 4216149 2019-08-15 06:56:40 2019-08-15 07:27:05 2019-08-15 07:49:04 0:21:59 mira master centos 7.6 rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml}
Failure Reason:

reached maximum tries (100) after waiting for 600 seconds

fail 4216150 2019-08-15 06:56:41 2019-08-15 07:28:53 2019-08-15 10:40:55 3:12:02 2:53:08 0:18:54 mira master rhel 7.6 rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read-overwrites.yaml} 2
Failure Reason:

Command failed on mira063 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph pg dump --format=json'

pass 4216151 2019-08-15 06:56:41 2019-08-15 07:42:51 2019-08-15 08:08:50 0:25:59 0:13:59 0:12:00 mira master centos 7.6 rados/objectstore/{backends/alloc-hint.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216152 2019-08-15 06:56:42 2019-08-15 07:42:51 2019-08-15 08:32:51 0:50:00 0:08:36 0:41:24 mira master ubuntu 18.04 rados/rest/{mgr-restful.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216153 2019-08-15 06:56:43 2019-08-15 07:46:50 2019-08-15 08:38:50 0:52:00 0:43:25 0:08:35 mira master rhel 7.6 rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216154 2019-08-15 06:56:44 2019-08-15 07:48:57 2019-08-15 08:32:57 0:44:00 0:23:04 0:20:56 mira master centos rados/singleton-flat/valgrind-leaks.yaml 1
pass 4216155 2019-08-15 06:56:45 2019-08-15 07:49:06 2019-08-15 08:21:05 0:31:59 0:17:58 0:14:01 mira master centos 7.6 rados/singleton-nomsgr/{all/admin_socket_output.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216156 2019-08-15 06:56:46 2019-08-15 07:53:03 2019-08-15 08:13:02 0:19:59 0:08:48 0:11:11 mira master ubuntu 18.04 rados/standalone/{supported-random-distro$/{ubuntu_latest.yaml} workloads/crush.yaml} 1
pass 4216157 2019-08-15 06:56:47 2019-08-15 07:53:03 2019-08-15 11:11:05 3:18:02 2:54:32 0:23:30 mira master centos 7.6 rados/upgrade/mimic-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/mimic.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-nautilus.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} bluestore-bitmap.yaml supported-random-distro$/{centos_7.yaml} thrashosds-health.yaml} 4
pass 4216158 2019-08-15 06:56:48 2019-08-15 07:58:51 2019-08-15 09:18:51 1:20:00 0:55:28 0:24:32 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/mimic.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml rados.yaml thrashers/careful.yaml thrashosds-health.yaml workloads/radosbench.yaml} 4
pass 4216159 2019-08-15 06:56:49 2019-08-15 08:04:53 2019-08-15 08:48:51 0:43:58 0:31:07 0:12:51 mira master centos 7.6 rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml} 2
pass 4216160 2019-08-15 06:56:50 2019-08-15 08:08:53 2019-08-15 09:02:52 0:53:59 0:24:55 0:29:04 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
pass 4216161 2019-08-15 06:56:51 2019-08-15 08:13:04 2019-08-15 08:31:03 0:17:59 0:08:03 0:09:56 mira master ubuntu 18.04 rados/singleton/{all/mon-config.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216162 2019-08-15 06:56:51 2019-08-15 08:21:07 2019-08-15 08:55:07 0:34:00 0:25:18 0:08:42 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/cache.yaml} 2
pass 4216163 2019-08-15 06:56:52 2019-08-15 08:31:11 2019-08-15 08:55:10 0:23:59 0:13:46 0:10:13 mira master ubuntu 18.04 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_cls_all.yaml} 2
pass 4216164 2019-08-15 06:56:53 2019-08-15 08:32:53 2019-08-15 09:34:53 1:02:00 0:51:36 0:10:24 mira master ubuntu 18.04 rados/singleton/{all/osd-backfill.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
fail 4216165 2019-08-15 06:56:54 2019-08-15 08:32:58 2019-08-15 09:04:58 0:32:00 0:22:09 0:09:51 mira master rhel 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml supported-random-distro$/{rhel_7.yaml} tasks/progress.yaml} 2
Failure Reason:

Test failure: test_osd_came_back (tasks.mgr.test_progress.TestProgress)

pass 4216166 2019-08-15 06:56:55 2019-08-15 08:38:52 2019-08-15 09:10:51 0:31:59 0:24:18 0:07:41 mira master rhel 7.6 rados/perf/{ceph.yaml objectstore/bluestore-low-osd-mem-target.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_7.yaml} workloads/radosbench_4M_rand_read.yaml} 1
pass 4216167 2019-08-15 06:56:56 2019-08-15 08:49:08 2019-08-15 09:21:07 0:31:59 0:26:04 0:05:55 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/dedup_tier.yaml} 2
pass 4216168 2019-08-15 06:56:56 2019-08-15 08:55:08 2019-08-15 09:21:07 0:25:59 0:14:13 0:11:46 mira master centos 7.6 rados/singleton-nomsgr/{all/cache-fs-trunc.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216169 2019-08-15 06:56:57 2019-08-15 08:55:12 2019-08-15 12:15:14 3:20:02 3:01:46 0:18:16 mira master rhel 7.6 rados/singleton/{all/osd-recovery-incomplete.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216170 2019-08-15 06:56:58 2019-08-15 09:02:56 2019-08-15 09:48:55 0:45:59 0:31:27 0:14:32 mira master centos 7.6 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_7.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 4216171 2019-08-15 06:56:59 2019-08-15 09:05:10 2019-08-15 09:55:09 0:49:59 0:22:15 0:27:44 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/nautilus-v1only.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/rbd_cls.yaml} 4
pass 4216172 2019-08-15 06:57:00 2019-08-15 09:11:08 2019-08-15 09:43:08 0:32:00 0:23:24 0:08:36 mira master rhel 7.6 rados/multimon/{clusters/3.yaml msgr-failures/few.yaml msgr/async-v1only.yaml no_pools.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} tasks/mon_recovery.yaml} 2
pass 4216173 2019-08-15 06:57:00 2019-08-15 09:18:54 2019-08-15 10:24:53 1:05:59 0:45:29 0:20:30 mira master centos rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_cls_all.yaml validater/valgrind.yaml} 2
pass 4216174 2019-08-15 06:57:01 2019-08-15 09:21:23 2019-08-15 10:13:22 0:51:59 0:43:52 0:08:07 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
pass 4216175 2019-08-15 06:57:02 2019-08-15 09:21:23 2019-08-15 10:01:22 0:39:59 0:28:24 0:11:35 mira master ubuntu 18.04 rados/monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/many.yaml workloads/rados_api_tests.yaml} 2
pass 4216176 2019-08-15 06:57:03 2019-08-15 09:34:54 2019-08-15 09:54:54 0:20:00 0:09:15 0:10:45 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/radosbench_4M_seq_read.yaml} 1
pass 4216177 2019-08-15 06:57:04 2019-08-15 09:43:09 2019-08-15 10:19:09 0:36:00 0:29:12 0:06:48 mira master rhel 7.6 rados/singleton/{all/osd-recovery.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216178 2019-08-15 06:57:05 2019-08-15 09:48:57 2019-08-15 10:24:56 0:35:59 0:29:41 0:06:18 mira master rhel 7.6 rados/objectstore/{backends/ceph_objectstore_tool.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216179 2019-08-15 06:57:06 2019-08-15 09:55:09 2019-08-15 10:31:09 0:36:00 0:28:09 0:07:51 mira master rhel 7.6 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} tasks/rados_python.yaml} 2
pass 4216180 2019-08-15 06:57:06 2019-08-15 09:55:11 2019-08-15 12:43:12 2:48:01 2:28:44 0:19:17 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 4216181 2019-08-15 06:57:07 2019-08-15 10:01:24 2019-08-15 10:21:23 0:19:59 0:09:20 0:10:39 mira master ubuntu 18.04 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/prometheus.yaml} 2
pass 4216182 2019-08-15 06:57:08 2019-08-15 10:13:24 2019-08-15 10:37:24 0:24:00 0:13:57 0:10:03 mira master centos 7.6 rados/singleton/{all/peer.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216183 2019-08-15 06:57:09 2019-08-15 10:19:11 2019-08-15 11:13:11 0:54:00 0:46:37 0:07:23 mira master rhel 7.6 rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=clay-k=4-m=2.yaml} 2
pass 4216184 2019-08-15 06:57:10 2019-08-15 10:21:27 2019-08-15 11:19:26 0:57:59 0:49:16 0:08:43 mira master rhel 7.6 rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml} 3
pass 4216185 2019-08-15 06:57:11 2019-08-15 10:24:55 2019-08-15 11:20:55 0:56:00 0:32:36 0:23:24 mira master centos 7.6 rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_7.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml} 2
pass 4216186 2019-08-15 06:57:11 2019-08-15 10:24:58 2019-08-15 11:10:57 0:45:59 0:20:59 0:25:00 mira master centos 7.6 rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_7.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 4216187 2019-08-15 06:57:12 2019-08-15 10:31:14 2019-08-15 10:47:13 0:15:59 0:07:08 0:08:51 mira master ubuntu 18.04 rados/singleton-nomsgr/{all/ceph-kvstore-tool.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216188 2019-08-15 06:57:13 2019-08-15 10:37:40 2019-08-15 12:21:40 1:44:00 1:31:29 0:12:31 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
pass 4216189 2019-08-15 06:57:14 2019-08-15 10:40:57 2019-08-15 11:06:56 0:25:59 0:15:32 0:10:27 mira master centos 7.6 rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_7.yaml} workloads/radosbench_4M_write.yaml} 1
pass 4216190 2019-08-15 06:57:15 2019-08-15 10:47:15 2019-08-15 11:19:15 0:32:00 0:17:03 0:14:57 mira master centos 7.6 rados/singleton/{all/pg-autoscaler.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 2
pass 4216191 2019-08-15 06:57:15 2019-08-15 11:06:59 2019-08-15 12:00:58 0:53:59 0:44:38 0:09:21 mira master centos 7.6 rados/standalone/{supported-random-distro$/{centos_7.yaml} workloads/erasure-code.yaml} 1
pass 4216192 2019-08-15 06:57:16 2019-08-15 11:11:00 2019-08-15 13:53:02 2:42:02 2:23:53 0:18:09 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
fail 4216193 2019-08-15 06:57:17 2019-08-15 11:11:07 2019-08-15 12:11:07 1:00:00 0:43:14 0:16:46 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/nautilus-v2only.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/fastclose.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 4
Failure Reason:

Command failed on mira034 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph tell osd.1 flush_pg_stats'

pass 4216194 2019-08-15 06:57:18 2019-08-15 11:13:13 2019-08-15 11:43:12 0:29:59 0:23:19 0:06:40 mira master rhel 7.6 rados/singleton/{all/pg-removal-interruption.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216195 2019-08-15 06:57:19 2019-08-15 11:19:23 2019-08-15 14:03:25 2:44:02 2:24:59 0:19:03 mira master rhel 7.6 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} tasks/rados_stress_watch.yaml} 2
pass 4216196 2019-08-15 06:57:19 2019-08-15 11:19:27 2019-08-15 11:57:27 0:38:00 0:31:40 0:06:20 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
pass 4216197 2019-08-15 06:57:20 2019-08-15 11:21:11 2019-08-15 13:53:12 2:32:01 2:13:35 0:18:26 mira master rhel 7.6 rados/singleton-nomsgr/{all/ceph-post-file.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216198 2019-08-15 06:57:21 2019-08-15 11:43:29 2019-08-15 12:09:28 0:25:59 0:14:12 0:11:47 mira master centos 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{centos_7.yaml} tasks/ssh_orchestrator.yaml} 2
pass 4216199 2019-08-15 06:57:22 2019-08-15 11:57:29 2019-08-15 12:35:28 0:37:59 0:26:27 0:11:32 mira master centos 7.6 rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_7.yaml} workloads/radosbench_omap_write.yaml} 1
pass 4216200 2019-08-15 06:57:23 2019-08-15 12:01:00 2019-08-15 12:33:00 0:32:00 0:17:30 0:14:30 mira master ubuntu 18.04 rados/singleton/{all/radostool.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216201 2019-08-15 06:57:23 2019-08-15 12:09:30 2019-08-15 12:55:30 0:46:00 0:32:55 0:13:05 mira master centos 7.6 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_7.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 4216202 2019-08-15 06:57:24 2019-08-15 12:11:22 2019-08-15 12:27:21 0:15:59 0:05:46 0:10:13 mira master ubuntu 18.04 rados/objectstore/{backends/filejournal.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216203 2019-08-15 06:57:25 2019-08-15 12:15:16 2019-08-15 12:57:15 0:41:59 0:33:33 0:08:26 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
pass 4216204 2019-08-15 06:57:26 2019-08-15 12:21:43 2019-08-15 14:09:43 1:48:00 1:20:01 0:27:59 mira master ubuntu 18.04 rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/one.yaml workloads/rados_mon_osdmap_prune.yaml} 2
pass 4216205 2019-08-15 06:57:27 2019-08-15 12:27:23 2019-08-15 13:21:22 0:53:59 0:24:07 0:29:52 mira master ubuntu 18.04 rados/singleton/{all/random-eio.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 2
pass 4216206 2019-08-15 06:57:28 2019-08-15 12:33:16 2019-08-15 13:23:15 0:49:59 0:44:08 0:05:51 mira master rhel 7.6 rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216207 2019-08-15 06:57:28 2019-08-15 12:35:44 2019-08-15 13:23:43 0:47:59 0:18:21 0:29:38 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/set-chunk-promote-flush.yaml} 2
pass 4216208 2019-08-15 06:57:29 2019-08-15 12:43:14 2019-08-15 15:43:16 3:00:02 2:40:59 0:19:03 mira master rhel 7.6 rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/fastclose.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml} 2
pass 4216209 2019-08-15 06:57:30 2019-08-15 12:55:32 2019-08-15 13:35:32 0:40:00 0:14:42 0:25:18 mira master ubuntu 18.04 rados/multimon/{clusters/6.yaml msgr-failures/many.yaml msgr/async-v2only.yaml no_pools.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/mon_clock_no_skews.yaml} 2
pass 4216210 2019-08-15 06:57:31 2019-08-15 12:57:31 2019-08-15 13:25:30 0:27:59 0:10:19 0:17:40 mira master rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml tasks/mon_recovery.yaml validater/lockdep.yaml} 2
pass 4216211 2019-08-15 06:57:32 2019-08-15 13:21:25 2019-08-15 14:07:24 0:45:59 0:21:07 0:24:52 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/nautilus.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml} 4
pass 4216212 2019-08-15 06:57:32 2019-08-15 13:23:30 2019-08-15 14:01:29 0:37:59 0:10:21 0:27:38 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/sample_fio.yaml} 1
pass 4216213 2019-08-15 06:57:33 2019-08-15 13:23:45 2019-08-15 14:07:44 0:43:59 0:14:18 0:29:41 mira master ubuntu 18.04 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_striper.yaml} 2
pass 4216214 2019-08-15 06:57:34 2019-08-15 13:25:32 2019-08-15 13:53:31 0:27:59 0:20:50 0:07:09 mira master rhel 7.6 rados/singleton-nomsgr/{all/export-after-evict.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
fail 4216215 2019-08-15 06:57:35 2019-08-15 13:35:33 2019-08-15 13:55:32 0:19:59 0:08:58 0:11:01 mira master ubuntu 18.04 rados/singleton/{all/rebuild-mondb.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
Failure Reason:

+ sudo adjust-ulimits ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --journal-path /var/lib/ceph/osd/ceph-0/journal --no-mon-config --op update-mon-db --mon-store-path /home/ubuntu/cephtest/mon-store ceph-objectstore-tool: /build/ceph-15.0.0-3934-g01ecef9/src/tools/rebuild_mondb.cc:290: int update_osdmap(ObjectStore&, OSDSuperblock&, MonitorDBStore&): Assertion `0' failed. *** Caught signal (Aborted) ** in thread 7fd60af12c00 thread_name:ceph-objectstor ceph version 15.0.0-3934-g01ecef9 (01ecef99a688ad7972451d18d0cde5d57d99249a) octopus (dev) 1: (()+0x12890) [0x7fd6004b8890] 2: (gsignal()+0xc7) [0x7fd5ff5ace97] 3: (abort()+0x141) [0x7fd5ff5ae801] 4: (()+0x3039a) [0x7fd5ff59e39a] 5: (()+0x30412) [0x7fd5ff59e412] 6: (update_mon_db(ObjectStore&, OSDSuperblock&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)+0x2be2) [0x55a7cc97a022] 7: (main()+0x4f9c) [0x55a7cc9221ac] 8: (__libc_start_main()+0xe7) [0x7fd5ff58fb97] 9: (_start()+0x2a) [0x55a7cc92eaea]

fail 4216216 2019-08-15 06:57:36 2019-08-15 13:53:03 2019-08-15 16:17:05 2:24:02 2:06:58 0:17:04 mira master rhel 7.6 rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml} 3
Failure Reason:

k --zap-all /dev/sdd', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:16:08.742447'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.027977', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP8VMXN', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sde'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:16:11.159611', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP8VMXN', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sde'}, u'cmd': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:16:10.131634'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011751', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d20112c5400'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211LB4E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdf'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:16:12.454891', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d20112c5400'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211LB4E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdf'}, u'cmd': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:16:11.443140'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011438', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011e25500'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N2112NUE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdg'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:16:13.774176', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011e25500'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N2112NUE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdg'}, u'cmd': u'sgdisk --zap-all /dev/sdg || sgdisk --zap-all /dev/sdg', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdg || sgdisk --zap-all /dev/sdg', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:16:12.762738'}, {'ansible_loop_var': u'item', '_ansible_no_log': False, 'skip_reason': u'Conditional result was False', 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP91N0M', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000524AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f602365c-3e1b-4c7f-a435-7729abad47a6', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f602365c-3e1b-4c7f-a435-7729abad47a6']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}, 'skipped': True, 'changed': False, '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP91N0M', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000524AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f602365c-3e1b-4c7f-a435-7729abad47a6', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f602365c-3e1b-4c7f-a435-7729abad47a6']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.012028', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011b25000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211RK0E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdb'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:16:15.058743', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011b25000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211RK0E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdb'}, u'cmd': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:16:14.046715'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.010738', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011335800'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211338E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdc'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:16:16.336534', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011335800'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211338E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdc'}, u'cmd': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:16:15.325796'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011350', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011a05200'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211PZBE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdh'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:16:17.624037', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011a05200'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211PZBE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdh'}, u'cmd': u'sgdisk --zap-all /dev/sdh || sgdisk --zap-all /dev/sdh', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdh || sgdisk --zap-all /dev/sdh', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:16:16.612687'}, {'stderr_lines': [u'Problem opening /dev/dm-0 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!', u'Problem opening /dev/dm-0 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!'], u'changed': True, u'stdout': u'', u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'delta': u'0:00:00.008564', 'stdout_lines': [], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'5VP8VMXN', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:16:17.889180', '_ansible_no_log': False, u'start': u'2019-08-15 16:16:17.880616', u'failed': True, u'cmd': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'5VP8VMXN', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, u'stderr': u"Problem opening /dev/dm-0 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-0 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", u'rc': 2, u'msg': u'non-zero return code'}]}}Traceback (most recent call last): File "/home/teuthworker/src/git.ceph.com_git_ceph-cm-ansible_master/callback_plugins/failure_log.py", line 44, in log_failure log.error(yaml.safe_dump(failure)) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 309, in safe_dump return dump_all([data], stream, Dumper=SafeDumper, **kwds) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 281, in dump_all dumper.represent(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 29, in represent node = self.represent_data(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 219, in represent_list return self.represent_sequence(u'tag:yaml.org,2002:seq', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 102, in represent_sequence node_item = self.represent_data(item) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 68, in represent_data node = self.yaml_representers[None](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 251, in represent_undefined raise RepresenterError("cannot represent an object", data)RepresenterError: ('cannot represent an object', u'sdd')

fail 4216217 2019-08-15 06:57:37 2019-08-15 13:53:14 2019-08-15 16:23:15 2:30:01 2:09:21 0:20:40 mira master rhel 7.6 rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/fastclose.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml} 2
Failure Reason:

'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdd || sgdisk --zap-all /dev/sdd', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:21:14.443610'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011428', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011a85600'], u'uuids': [u'30c8934e-779d-422d-b99d-88661f23d801']}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211XZVE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sde'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:21:16.749450', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011a85600'], u'uuids': [u'30c8934e-779d-422d-b99d-88661f23d801']}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211XZVE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sde'}, u'cmd': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:21:15.738022'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011231', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2040775100'], u'uuids': [u'2badfea5-f61b-41fc-884a-29c6545e8e5b']}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N204WG1E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdf'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:21:18.026828', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2040775100'], u'uuids': [u'2badfea5-f61b-41fc-884a-29c6545e8e5b']}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N204WG1E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdf'}, u'cmd': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:21:17.015597'}, {'ansible_loop_var': u'item', '_ansible_no_log': False, 'skip_reason': u'Conditional result was False', 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP66QW9', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000524AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f602365c-3e1b-4c7f-a435-7729abad47a6', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f602365c-3e1b-4c7f-a435-7729abad47a6']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}, 'skipped': True, 'changed': False, '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP66QW9', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000524AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f602365c-3e1b-4c7f-a435-7729abad47a6', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f602365c-3e1b-4c7f-a435-7729abad47a6']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.030582', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP8NWLD', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdb'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:21:19.326218', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP8NWLD', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdb'}, u'cmd': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:21:18.295636'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.030229', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'6VPBDH90', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdc'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:21:20.629154', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'6VPBDH90', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdc'}, u'cmd': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 16:21:19.598925'}, {'stderr_lines': [u'Problem opening /dev/dm-0 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!', u'Problem opening /dev/dm-0 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!'], u'changed': True, u'stdout': u'', u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'delta': u'0:00:00.008576', 'stdout_lines': [], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'5VP8NWLD', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 16:21:20.885059', '_ansible_no_log': False, u'start': u'2019-08-15 16:21:20.876483', u'failed': True, u'cmd': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'5VP8NWLD', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, u'stderr': u"Problem opening /dev/dm-0 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-0 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", u'rc': 2, u'msg': u'non-zero return code'}]}}Traceback (most recent call last): File "/home/teuthworker/src/git.ceph.com_git_ceph-cm-ansible_master/callback_plugins/failure_log.py", line 44, in log_failure log.error(yaml.safe_dump(failure)) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 309, in safe_dump return dump_all([data], stream, Dumper=SafeDumper, **kwds) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 281, in dump_all dumper.represent(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 29, in represent node = self.represent_data(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 219, in represent_list return self.represent_sequence(u'tag:yaml.org,2002:seq', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 102, in represent_sequence node_item = self.represent_data(item) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 68, in represent_data node = self.yaml_representers[None](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 251, in represent_undefined raise RepresenterError("cannot represent an object", data)RepresenterError: ('cannot represent an object', u'sdd')

pass 4216218 2019-08-15 06:57:37 2019-08-15 13:53:33 2019-08-15 14:29:32 0:35:59 0:24:53 0:11:06 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
pass 4216219 2019-08-15 06:57:38 2019-08-15 13:55:34 2019-08-15 14:49:34 0:54:00 0:13:40 0:40:20 mira master ubuntu 18.04 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-low-osd-mem-target.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/workunits.yaml} 2
pass 4216220 2019-08-15 06:57:39 2019-08-15 14:01:35 2019-08-15 14:35:35 0:34:00 0:20:17 0:13:43 mira master ubuntu 18.04 rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 4216221 2019-08-15 06:57:40 2019-08-15 14:03:39 2019-08-15 14:59:39 0:56:00 0:48:28 0:07:32 mira master rhel 7.6 rados/singleton/{all/recovery-preemption.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
dead 4216222 2019-08-15 06:57:41 2019-08-15 14:07:38 2019-08-16 02:10:09 12:02:31 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
pass 4216223 2019-08-15 06:57:42 2019-08-15 14:07:46 2019-08-15 14:25:45 0:17:59 0:08:50 0:09:09 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/bluestore-low-osd-mem-target.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/sample_radosbench.yaml} 1
pass 4216224 2019-08-15 06:57:42 2019-08-15 14:09:45 2019-08-15 14:39:44 0:29:59 0:22:23 0:07:36 mira master rhel 7.6 rados/singleton/{all/resolve_stuck_peering.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 2
pass 4216225 2019-08-15 06:57:43 2019-08-15 14:25:48 2019-08-15 15:05:47 0:39:59 0:29:46 0:10:13 mira master ubuntu 18.04 rados/objectstore/{backends/filestore-idempotent-aio-journal.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216226 2019-08-15 06:57:44 2019-08-15 14:29:42 2019-08-15 14:49:41 0:19:59 0:11:22 0:08:37 mira master ubuntu 18.04 rados/singleton-nomsgr/{all/full-tiering.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216227 2019-08-15 06:57:45 2019-08-15 14:35:44 2019-08-15 15:17:43 0:41:59 0:34:21 0:07:38 mira master rhel 7.6 rados/standalone/{supported-random-distro$/{rhel_7.yaml} workloads/misc.yaml} 1
pass 4216228 2019-08-15 06:57:46 2019-08-15 14:39:50 2019-08-15 15:21:50 0:42:00 0:31:48 0:10:12 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
pass 4216229 2019-08-15 06:57:46 2019-08-15 14:49:42 2019-08-15 15:41:42 0:52:00 0:44:21 0:07:39 mira master rhel 7.6 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} tasks/rados_workunit_loadgen_big.yaml} 2
dead 4216230 2019-08-15 06:57:47 2019-08-15 14:49:43 2019-08-16 02:52:10 12:02:27 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/hammer.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml rados.yaml thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 4
fail 4216231 2019-08-15 06:57:48 2019-08-15 14:59:41 2019-08-15 15:49:41 0:50:00 0:37:50 0:12:10 mira master ubuntu 18.04 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
Failure Reason:

failed to complete snap trimming before timeout

pass 4216232 2019-08-15 06:57:49 2019-08-15 15:06:02 2019-08-15 15:36:01 0:29:59 0:23:09 0:06:50 mira master rhel 7.6 rados/singleton/{all/test-crash.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216233 2019-08-15 06:57:50 2019-08-15 15:17:45 2019-08-15 15:53:45 0:36:00 0:29:17 0:06:43 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
pass 4216234 2019-08-15 06:57:50 2019-08-15 15:22:05 2019-08-15 15:52:04 0:29:59 0:17:05 0:12:54 mira master centos 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-low-osd-mem-target.yaml supported-random-distro$/{centos_7.yaml} tasks/crash.yaml} 2
fail 4216235 2019-08-15 06:57:51 2019-08-15 15:36:04 2019-08-15 16:10:03 0:33:59 0:23:23 0:10:36 mira master centos 7.6 rados/perf/{ceph.yaml objectstore/bluestore-low-osd-mem-target.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_7.yaml} workloads/cosbench_64K_read_write.yaml} 1
Failure Reason:

Command failed on mira064 with status 1: 'find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest'

pass 4216236 2019-08-15 06:57:52 2019-08-15 15:41:58 2019-08-15 16:41:58 1:00:00 0:34:44 0:25:16 mira master ubuntu 18.04 rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml} 2
pass 4216237 2019-08-15 06:57:53 2019-08-15 15:43:31 2019-08-15 16:41:31 0:58:00 0:36:47 0:21:13 mira master centos 7.6 rados/monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/sync-many.yaml workloads/rados_mon_workunits.yaml} 2
fail 4216238 2019-08-15 06:57:54 2019-08-15 15:49:51 2019-08-15 16:11:50 0:21:59 0:11:37 0:10:22 mira master ubuntu 18.04 rados/singleton/{all/test_envlibrados_for_rocksdb.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
Failure Reason:

Command failed (workunit test rados/test_envlibrados_for_rocksdb.sh) on mira046 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=01ecef99a688ad7972451d18d0cde5d57d99249a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_envlibrados_for_rocksdb.sh'

pass 4216239 2019-08-15 06:57:54 2019-08-15 15:52:07 2019-08-15 16:28:06 0:35:59 0:28:52 0:07:07 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 4216240 2019-08-15 06:57:55 2019-08-15 15:53:47 2019-08-15 16:21:46 0:27:59 0:16:26 0:11:33 mira master centos 7.6 rados/singleton-nomsgr/{all/health-warnings.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216241 2019-08-15 06:57:56 2019-08-15 16:10:21 2019-08-15 17:46:21 1:36:00 0:44:14 0:51:46 mira master ubuntu 18.04 rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml} 3
pass 4216242 2019-08-15 06:57:57 2019-08-15 16:11:52 2019-08-15 17:15:52 1:04:00 0:35:55 0:28:05 mira master ubuntu 18.04 rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-pool-snaps-few-objects-overwrites.yaml} 2
pass 4216243 2019-08-15 06:57:58 2019-08-15 16:17:22 2019-08-15 17:27:23 1:10:01 1:01:24 0:08:37 mira master rhel 7.6 rados/singleton/{all/thrash-backfill-full.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 2
pass 4216244 2019-08-15 06:57:58 2019-08-15 16:21:49 2019-08-15 17:01:48 0:39:59 0:33:30 0:06:29 mira master rhel 7.6 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} tasks/rados_workunit_loadgen_mix.yaml} 2
pass 4216245 2019-08-15 06:57:59 2019-08-15 16:23:34 2019-08-15 17:49:34 1:26:00 1:04:10 0:21:50 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/cache-agent-big.yaml} 2
fail 4216246 2019-08-15 06:58:00 2019-08-15 16:28:26 2019-08-15 17:02:25 0:33:59 0:24:00 0:09:59 mira master centos 7.6 rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_7.yaml} workloads/cosbench_64K_write.yaml} 1
Failure Reason:

Command failed on mira041 with status 1: 'find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest'

pass 4216247 2019-08-15 06:58:01 2019-08-15 16:41:47 2019-08-15 17:09:47 0:28:00 0:17:43 0:10:17 mira master ubuntu 18.04 rados/multimon/{clusters/9.yaml msgr-failures/few.yaml msgr/async.yaml no_pools.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/mon_clock_with_skews.yaml} 3
fail 4216248 2019-08-15 06:58:02 2019-08-15 16:41:59 2019-08-15 18:10:00 1:28:01 1:15:34 0:12:27 mira master centos rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml tasks/rados_api_tests.yaml validater/valgrind.yaml} 2
Failure Reason:

ceph-objectstore-tool: exp list-pgs failure with status 1

fail 4216249 2019-08-15 06:58:02 2019-08-15 17:01:51 2019-08-15 18:35:51 1:34:00 1:15:47 0:18:13 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/jewel-v1only.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/fastclose.yaml rados.yaml thrashers/pggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml} 4
Failure Reason:

"2019-08-15T18:16:22.880096+0000 osd.1 (osd.1) 1 : cluster [ERR] 6.3 required past_interval bounds are empty [1206,1204) but past_intervals is not: ([1123,1203] all_participants=1,2,6,7 intervals=([1146,1203] acting 6,7))" in cluster log

pass 4216250 2019-08-15 06:58:03 2019-08-15 17:02:41 2019-08-15 18:04:41 1:02:00 0:31:28 0:30:32 mira master ubuntu 18.04 rados/singleton/{all/thrash-eio.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 2
pass 4216251 2019-08-15 06:58:04 2019-08-15 17:09:49 2019-08-15 17:39:48 0:29:59 0:16:22 0:13:37 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/cache-agent-small.yaml} 2
pass 4216252 2019-08-15 06:58:05 2019-08-15 17:16:07 2019-08-15 18:10:07 0:54:00 0:33:48 0:20:12 mira master ubuntu 18.04 rados/objectstore/{backends/filestore-idempotent.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
fail 4216253 2019-08-15 06:58:06 2019-08-15 17:27:38 2019-08-15 18:29:38 1:02:00 0:40:51 0:21:09 mira master ubuntu 18.04 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/dashboard.yaml} 2
Failure Reason:

Test failure: test_create_get_update_delete_w_tenant (tasks.mgr.dashboard.test_rgw.RgwBucketTest)

pass 4216254 2019-08-15 06:58:07 2019-08-15 17:39:50 2019-08-15 18:59:51 1:20:01 0:15:23 1:04:38 mira master ubuntu 18.04 rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 4216255 2019-08-15 06:58:07 2019-08-15 17:46:22 2019-08-15 18:14:22 0:28:00 0:21:25 0:06:35 mira master rhel 7.6 rados/singleton-nomsgr/{all/large-omap-object-warnings.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216256 2019-08-15 06:58:08 2019-08-15 17:49:49 2019-08-15 18:35:49 0:46:00 0:25:57 0:20:03 mira master centos 7.6 rados/singleton/{all/thrash-rados/{thrash-rados.yaml thrashosds-health.yaml} msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 2
fail 4216257 2019-08-15 06:58:09 2019-08-15 18:04:57 2019-08-15 18:50:57 0:46:00 0:36:36 0:09:24 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/cache-pool-snaps-readproxy.yaml} 2
Failure Reason:

"2019-08-15T18:44:26.943946+0000 osd.0 (osd.0) 2 : cluster [ERR] 2.1 required past_interval bounds are empty [556,547) but past_intervals is not: ([475,546] all_participants=0,2 intervals=([475,546] acting 0,2))" in cluster log

pass 4216258 2019-08-15 06:58:10 2019-08-15 18:10:02 2019-08-15 18:40:01 0:29:59 0:18:58 0:11:01 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/fio_4K_rand_read.yaml} 1
pass 4216259 2019-08-15 06:58:11 2019-08-15 18:10:08 2019-08-15 19:06:08 0:56:00 0:42:30 0:13:30 mira master centos 7.6 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_7.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 4216260 2019-08-15 06:58:12 2019-08-15 18:14:38 2019-08-15 18:52:37 0:37:59 0:27:23 0:10:36 mira master ubuntu 18.04 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_workunit_loadgen_mostlyread.yaml} 2
pass 4216261 2019-08-15 06:58:12 2019-08-15 18:29:40 2019-08-15 21:21:42 2:52:02 2:34:20 0:17:42 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-pool-snaps.yaml} 2
pass 4216262 2019-08-15 06:58:13 2019-08-15 18:36:04 2019-08-15 19:24:04 0:48:00 0:35:15 0:12:45 mira master centos 7.6 rados/singleton/{all/thrash_cache_writeback_proxy_none.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 2
pass 4216263 2019-08-15 06:58:14 2019-08-15 18:36:04 2019-08-15 19:28:04 0:52:00 0:41:42 0:10:18 mira master centos 7.6 rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216264 2019-08-15 06:58:15 2019-08-15 18:40:02 2019-08-15 19:52:02 1:12:00 1:01:51 0:10:09 mira master centos 7.6 rados/standalone/{supported-random-distro$/{centos_7.yaml} workloads/mon.yaml} 1
pass 4216265 2019-08-15 06:58:16 2019-08-15 18:50:58 2019-08-15 22:13:01 3:22:03 3:05:22 0:16:41 mira master centos 7.6 rados/upgrade/nautilus-x-singleton/{0-cluster/{openstack.yaml start.yaml} 1-install/nautilus.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-workload/{rbd-cls.yaml rbd-import-export.yaml readwrite.yaml snaps-few-objects.yaml} 5-workload/{radosbench.yaml rbd_api.yaml} 6-finish-upgrade.yaml 7-octopus.yaml 8-workload/{rbd-python.yaml rgw-swift.yaml snaps-many-objects.yaml} bluestore-bitmap.yaml supported-random-distro$/{centos_7.yaml} thrashosds-health.yaml} 4
fail 4216266 2019-08-15 06:58:17 2019-08-15 18:52:51 2019-08-15 21:16:53 2:24:02 2:15:34 0:08:28 mira master rhel 7.6 rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/minsize_recovery.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml} 2
Failure Reason:

reached maximum tries (800) after waiting for 4800 seconds

pass 4216267 2019-08-15 06:58:17 2019-08-15 19:00:05 2019-08-15 19:50:05 0:50:00 0:22:58 0:27:02 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/jewel.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml rados.yaml thrashers/careful.yaml thrashosds-health.yaml workloads/rbd_cls.yaml} 4
pass 4216268 2019-08-15 06:58:18 2019-08-15 19:06:10 2019-08-15 19:58:09 0:51:59 0:39:50 0:12:09 mira master ubuntu 18.04 rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync.yaml workloads/snaps-few-objects.yaml} 2
pass 4216269 2019-08-15 06:58:19 2019-08-15 19:24:14 2019-08-15 20:04:13 0:39:59 0:31:30 0:08:29 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 2
pass 4216270 2019-08-15 06:58:20 2019-08-15 19:28:19 2019-08-15 19:58:18 0:29:59 0:17:56 0:12:03 mira master centos 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml supported-random-distro$/{centos_7.yaml} tasks/failover.yaml} 2
pass 4216271 2019-08-15 06:58:21 2019-08-15 19:50:07 2019-08-15 20:18:06 0:27:59 0:16:23 0:11:36 mira master centos 7.6 rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_7.yaml} workloads/fio_4K_rand_rw.yaml} 1
pass 4216272 2019-08-15 06:58:22 2019-08-15 19:52:04 2019-08-15 20:16:03 0:23:59 0:14:02 0:09:57 mira master centos 7.6 rados/singleton/{all/watch-notify-same-primary.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216273 2019-08-15 06:58:23 2019-08-15 19:58:11 2019-08-15 20:20:10 0:21:59 0:12:28 0:09:31 mira master ubuntu 18.04 rados/singleton-nomsgr/{all/lazy_omap_stats_output.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
fail 4216274 2019-08-15 06:58:23 2019-08-15 19:58:32 2019-08-15 22:22:34 2:24:02 2:06:46 0:17:16 mira master rhel 7.6 rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml} 3
Failure Reason:

k --zap-all /dev/sdd', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 22:21:54.191318'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011274', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP8VMXN', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sde'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 22:21:56.620410', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP8VMXN', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sde'}, u'cmd': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 22:21:55.609136'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011114', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d20112c5400'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211LB4E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdf'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 22:21:57.880931', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d20112c5400'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211LB4E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdf'}, u'cmd': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 22:21:56.869817'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011226', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011e25500'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N2112NUE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdg'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 22:21:59.146274', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011e25500'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N2112NUE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdg'}, u'cmd': u'sgdisk --zap-all /dev/sdg || sgdisk --zap-all /dev/sdg', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdg || sgdisk --zap-all /dev/sdg', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 22:21:58.135048'}, {'ansible_loop_var': u'item', '_ansible_no_log': False, 'skip_reason': u'Conditional result was False', 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP91N0M', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000524AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f602365c-3e1b-4c7f-a435-7729abad47a6', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f602365c-3e1b-4c7f-a435-7729abad47a6']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}, 'skipped': True, 'changed': False, '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP91N0M', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000524AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f602365c-3e1b-4c7f-a435-7729abad47a6', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f602365c-3e1b-4c7f-a435-7729abad47a6']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011582', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011b25000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211RK0E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdb'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 22:22:00.434207', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011b25000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211RK0E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdb'}, u'cmd': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 22:21:59.422625'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.022423', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011335800'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211338E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdc'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 22:22:01.727190', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011335800'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211338E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdc'}, u'cmd': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 22:22:00.704767'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011366', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011a05200'], u'uuids': [u'e48a2d4f-f727-4a37-8e6d-754cdfe1fc2a']}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211PZBE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdh'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 22:22:02.997970', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2011a05200'], u'uuids': [u'e48a2d4f-f727-4a37-8e6d-754cdfe1fc2a']}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N211PZBE', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdh'}, u'cmd': u'sgdisk --zap-all /dev/sdh || sgdisk --zap-all /dev/sdh', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdh || sgdisk --zap-all /dev/sdh', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-15 22:22:01.986604'}, {'stderr_lines': [u'Problem opening /dev/dm-0 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!', u'Problem opening /dev/dm-0 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!'], u'changed': True, u'stdout': u'', u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'delta': u'0:00:00.008508', 'stdout_lines': [], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'5VP8VMXN', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, 'ansible_loop_var': u'item', u'end': u'2019-08-15 22:22:03.257407', '_ansible_no_log': False, u'start': u'2019-08-15 22:22:03.248899', u'failed': True, u'cmd': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'5VP8VMXN', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, u'stderr': u"Problem opening /dev/dm-0 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-0 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", u'rc': 2, u'msg': u'non-zero return code'}]}}Traceback (most recent call last): File "/home/teuthworker/src/git.ceph.com_git_ceph-cm-ansible_master/callback_plugins/failure_log.py", line 44, in log_failure log.error(yaml.safe_dump(failure)) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 309, in safe_dump return dump_all([data], stream, Dumper=SafeDumper, **kwds) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 281, in dump_all dumper.represent(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 29, in represent node = self.represent_data(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 219, in represent_list return self.represent_sequence(u'tag:yaml.org,2002:seq', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 102, in represent_sequence node_item = self.represent_data(item) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 68, in represent_data node = self.yaml_representers[None](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 251, in represent_undefined raise RepresenterError("cannot represent an object", data)RepresenterError: ('cannot represent an object', u'sdd')

fail 4216275 2019-08-15 06:58:24 2019-08-15 20:04:15 2019-08-15 20:58:15 0:54:00 0:44:50 0:09:10 mira master rhel 7.6 rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/minsize_recovery.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read-overwrites.yaml} 2
Failure Reason:

failed to complete snap trimming before timeout

pass 4216276 2019-08-15 06:58:25 2019-08-15 20:16:18 2019-08-15 21:02:17 0:45:59 0:22:48 0:23:11 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/cache.yaml} 2
pass 4216277 2019-08-15 06:58:26 2019-08-15 20:18:21 2019-08-15 20:46:20 0:27:59 0:20:30 0:07:29 mira master rhel 7.6 rados/singleton/{all/admin-socket.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216278 2019-08-15 06:58:27 2019-08-15 20:20:25 2019-08-15 20:44:24 0:23:59 0:12:11 0:11:48 mira master centos 7.6 rados/objectstore/{backends/fusestore.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216279 2019-08-15 06:58:28 2019-08-15 20:44:26 2019-08-15 23:24:28 2:40:02 2:22:12 0:17:50 mira master rhel 7.6 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} tasks/readwrite.yaml} 2
pass 4216280 2019-08-15 06:58:28 2019-08-15 20:46:22 2019-08-15 21:20:21 0:33:59 0:10:27 0:23:32 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/fio_4M_rand_read.yaml} 1
pass 4216281 2019-08-15 06:58:29 2019-08-15 20:58:16 2019-08-15 21:22:16 0:24:00 0:14:22 0:09:38 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/dedup_tier.yaml} 2
pass 4216282 2019-08-15 06:58:30 2019-08-15 21:02:19 2019-08-15 21:26:18 0:23:59 0:13:36 0:10:23 mira master centos 7.6 rados/singleton/{all/deduptool.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216283 2019-08-15 06:58:31 2019-08-15 21:17:08 2019-08-15 22:09:08 0:52:00 0:29:27 0:22:33 mira master centos 7.6 rados/multimon/{clusters/21.yaml msgr-failures/many.yaml msgr/async-v1only.yaml no_pools.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} tasks/mon_recovery.yaml} 3
pass 4216284 2019-08-15 06:58:32 2019-08-15 21:20:23 2019-08-15 21:48:22 0:27:59 0:11:09 0:16:50 mira master rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml tasks/rados_cls_all.yaml validater/lockdep.yaml} 2
pass 4216285 2019-08-15 06:58:33 2019-08-15 21:21:57 2019-08-15 22:27:57 1:06:00 0:39:51 0:26:09 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/luminous-v1only.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml rados.yaml thrashers/default.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 4
pass 4216286 2019-08-15 06:58:34 2019-08-15 21:22:17 2019-08-15 21:52:17 0:30:00 0:24:33 0:05:27 mira master rhel 7.6 rados/singleton-nomsgr/{all/librados_hello_world.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216287 2019-08-15 06:58:34 2019-08-15 21:26:23 2019-08-15 21:56:22 0:29:59 0:17:39 0:12:20 mira master centos 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-bitmap.yaml supported-random-distro$/{centos_7.yaml} tasks/insights.yaml} 2
fail 4216288 2019-08-15 06:58:35 2019-08-15 21:48:24 2019-08-15 22:38:24 0:50:00 0:36:40 0:13:20 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/pool-snaps-few-objects.yaml} 2
Failure Reason:

"2019-08-15T22:13:06.110608+0000 mon.b (mon.0) 22 : cluster [WRN] Health check failed: 1/3 mons down, quorum b,a (MON_DOWN)" in cluster log

pass 4216289 2019-08-15 06:58:36 2019-08-15 21:52:19 2019-08-15 22:18:18 0:25:59 0:15:40 0:10:19 mira master centos 7.6 rados/singleton/{all/divergent_priors.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216290 2019-08-15 06:58:37 2019-08-15 21:56:24 2019-08-15 22:46:23 0:49:59 0:38:29 0:11:30 mira master centos 7.6 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_7.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 4216291 2019-08-15 06:58:38 2019-08-15 22:09:25 2019-08-15 22:59:24 0:49:59 0:36:26 0:13:33 mira master centos 7.6 rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/fastclose.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_7.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml} 2
pass 4216292 2019-08-15 06:58:39 2019-08-15 22:13:02 2019-08-15 22:31:02 0:18:00 0:08:26 0:09:34 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/bluestore-low-osd-mem-target.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/fio_4M_rand_rw.yaml} 1
pass 4216293 2019-08-15 06:58:40 2019-08-15 22:18:28 2019-08-15 23:18:28 1:00:00 0:22:08 0:37:52 mira master ubuntu 18.04 rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 4216294 2019-08-15 06:58:40 2019-08-15 22:22:36 2019-08-15 23:12:38 0:50:02 0:28:48 0:21:14 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/rados_api_tests.yaml} 2
pass 4216295 2019-08-15 06:58:41 2019-08-15 22:28:03 2019-08-15 23:16:03 0:48:00 0:26:32 0:21:28 mira master centos 7.6 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} tasks/repair_test.yaml} 2
pass 4216296 2019-08-15 06:58:42 2019-08-15 22:31:03 2019-08-15 22:49:02 0:17:59 0:08:06 0:09:53 mira master ubuntu 18.04 rados/singleton/{all/divergent_priors2.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216297 2019-08-15 06:58:43 2019-08-15 22:38:38 2019-08-15 23:46:38 1:08:00 0:55:15 0:12:45 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/radosbench.yaml} 2
pass 4216298 2019-08-15 06:58:44 2019-08-15 22:46:34 2019-08-15 23:18:33 0:31:59 0:19:11 0:12:48 mira master centos 7.6 rados/monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/force-sync-many.yaml workloads/pool-create-delete.yaml} 2
pass 4216299 2019-08-15 06:58:45 2019-08-15 22:49:18 2019-08-15 23:13:17 0:23:59 0:11:03 0:12:56 mira master centos 7.6 rados/objectstore/{backends/keyvaluedb.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216300 2019-08-15 06:58:46 2019-08-15 22:59:26 2019-08-15 23:31:25 0:31:59 0:22:22 0:09:37 mira master centos 7.6 rados/singleton-nomsgr/{all/msgr.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216301 2019-08-15 06:58:47 2019-08-15 23:12:41 2019-08-16 03:56:50 4:44:09 4:25:58 0:18:11 mira master rhel 7.6 rados/standalone/{supported-random-distro$/{rhel_7.yaml} workloads/osd.yaml} 1
pass 4216302 2019-08-15 06:58:47 2019-08-15 23:13:18 2019-08-15 23:41:18 0:28:00 0:22:13 0:05:47 mira master rhel 7.6 rados/singleton/{all/dump-stuck.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216303 2019-08-15 06:58:48 2019-08-15 23:16:18 2019-08-16 00:06:17 0:49:59 0:20:20 0:29:39 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/luminous.yaml backoff/peering.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/fastclose.yaml rados.yaml thrashers/mapgap.yaml thrashosds-health.yaml workloads/test_rbd_api.yaml} 4
pass 4216304 2019-08-15 06:58:49 2019-08-15 23:18:30 2019-08-16 00:08:29 0:49:59 0:33:50 0:16:09 mira master centos 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-comp.yaml supported-random-distro$/{centos_7.yaml} tasks/module_selftest.yaml} 2
pass 4216305 2019-08-15 06:58:50 2019-08-15 23:18:35 2019-08-15 23:46:34 0:27:59 0:22:22 0:05:37 mira master rhel 7.6 rados/perf/{ceph.yaml objectstore/bluestore-stupid.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{rhel_7.yaml} workloads/fio_4M_rand_write.yaml} 1
pass 4216306 2019-08-15 06:58:51 2019-08-15 23:24:29 2019-08-16 00:00:29 0:36:00 0:22:12 0:13:48 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/redirect.yaml} 2
pass 4216307 2019-08-15 06:58:52 2019-08-15 23:31:27 2019-08-16 00:21:27 0:50:00 0:42:45 0:07:15 mira master rhel 7.6 rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/fastclose.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml} 3
pass 4216308 2019-08-15 06:58:53 2019-08-15 23:41:35 2019-08-16 02:39:36 2:58:01 2:39:31 0:18:30 mira master rhel 7.6 rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/fastclose.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-overwrites.yaml} 2
pass 4216309 2019-08-15 06:58:54 2019-08-15 23:46:40 2019-08-16 00:56:40 1:10:00 0:59:30 0:10:30 mira master ubuntu 18.04 rados/singleton/{all/ec-lost-unfound.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216310 2019-08-15 06:58:55 2019-08-15 23:46:40 2019-08-16 01:06:40 1:20:00 0:24:06 0:55:54 mira master ubuntu 18.04 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rgw_snaps.yaml} 2
pass 4216311 2019-08-15 06:58:56 2019-08-16 00:00:44 2019-08-16 00:38:43 0:37:59 0:24:32 0:13:27 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/redirect_promote_tests.yaml} 2
pass 4216312 2019-08-15 06:58:56 2019-08-16 00:06:20 2019-08-16 00:30:19 0:23:59 0:12:01 0:11:58 mira master centos 7.6 rados/singleton/{all/erasure-code-nonregression.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216313 2019-08-15 06:58:57 2019-08-16 00:08:31 2019-08-16 01:06:31 0:58:00 0:36:38 0:21:22 mira master centos 7.6 rados/singleton-nomsgr/{all/multi-backfill-reject.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 2
pass 4216314 2019-08-15 06:58:58 2019-08-16 00:21:43 2019-08-16 00:39:42 0:17:59 0:09:04 0:08:55 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/filestore-xfs.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/radosbench_4K_rand_read.yaml} 1
pass 4216315 2019-08-15 06:58:59 2019-08-16 00:30:36 2019-08-16 01:08:35 0:37:59 0:25:59 0:12:00 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/redirect_set_object.yaml} 2
pass 4216316 2019-08-15 06:59:00 2019-08-16 00:38:58 2019-08-16 01:30:58 0:52:00 0:42:27 0:09:33 mira master rhel 7.6 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
pass 4216317 2019-08-15 06:59:01 2019-08-16 00:39:43 2019-08-16 01:33:43 0:54:00 0:42:02 0:11:58 mira master centos 7.6 rados/singleton-bluestore/{all/cephtool.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216318 2019-08-15 06:59:02 2019-08-16 00:56:56 2019-08-16 01:26:55 0:29:59 0:19:49 0:10:10 mira master ubuntu 18.04 rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/few.yaml objectstore/bluestore-comp.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-small-objects-many-deletes.yaml} 2
pass 4216319 2019-08-15 06:59:03 2019-08-16 01:06:37 2019-08-16 02:40:37 1:34:00 1:14:01 0:19:59 mira master centos 7.6 rados/singleton/{all/lost-unfound-delete.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216320 2019-08-15 06:59:04 2019-08-16 01:06:41 2019-08-16 01:30:41 0:24:00 0:12:43 0:11:17 mira master centos 7.6 rados/multimon/{clusters/3.yaml msgr-failures/few.yaml msgr/async-v2only.yaml no_pools.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_7.yaml} tasks/mon_clock_no_skews.yaml} 2
dead 4216321 2019-08-15 06:59:05 2019-08-16 01:08:42 2019-08-16 13:11:13 12:02:31 mira master centos rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/default/{default.yaml thrashosds-health.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml tasks/mon_recovery.yaml validater/valgrind.yaml} 2
pass 4216322 2019-08-15 06:59:06 2019-08-16 01:27:11 2019-08-16 02:19:11 0:52:00 0:34:12 0:17:48 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-install/mimic-v1only.yaml backoff/peering_and_degraded.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/crush-compat.yaml distro$/{centos_latest.yaml} msgr-failures/few.yaml rados.yaml thrashers/morepggrow.yaml thrashosds-health.yaml workloads/cache-snaps.yaml} 4
fail 4216323 2019-08-15 06:59:07 2019-08-16 01:30:42 2019-08-16 02:02:42 0:32:00 0:19:06 0:12:54 mira master centos 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-low-osd-mem-target.yaml supported-random-distro$/{centos_7.yaml} tasks/orchestrator_cli.yaml} 2
Failure Reason:

"2019-08-16T02:00:25.985060+0000 mds.a (mds.0) 1 : cluster [WRN] evicting unresponsive client mira041:x (4668), after 304.83 seconds" in cluster log

pass 4216324 2019-08-15 06:59:08 2019-08-16 01:31:00 2019-08-16 02:30:59 0:59:59 0:13:31 0:46:28 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/set-chunk-promote-flush.yaml} 2
pass 4216325 2019-08-15 06:59:08 2019-08-16 01:33:59 2019-08-16 02:15:58 0:41:59 0:32:19 0:09:40 mira master ubuntu 18.04 rados/objectstore/{backends/objectcacher-stress.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216326 2019-08-15 06:59:09 2019-08-16 02:02:58 2019-08-16 03:22:59 1:20:01 1:11:54 0:08:07 mira master ubuntu 18.04 rados/singleton/{all/lost-unfound.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216327 2019-08-15 06:59:10 2019-08-16 02:10:26 2019-08-16 02:52:26 0:42:00 0:19:28 0:22:32 mira master centos 7.6 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/many.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} tasks/scrub_test.yaml} 2
pass 4216328 2019-08-15 06:59:11 2019-08-16 02:16:00 2019-08-16 02:33:59 0:17:59 0:09:02 0:08:57 mira master ubuntu 18.04 rados/perf/{ceph.yaml objectstore/bluestore-bitmap.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{ubuntu_latest.yaml} workloads/radosbench_4K_seq_read.yaml} 1
pass 4216329 2019-08-15 06:59:12 2019-08-16 02:19:25 2019-08-16 03:13:25 0:54:00 0:27:22 0:26:38 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/mapgap.yaml thrashosds-health.yaml workloads/set-chunks-read.yaml} 2
pass 4216330 2019-08-15 06:59:13 2019-08-16 02:31:02 2019-08-16 05:11:03 2:40:01 2:21:54 0:18:07 mira master rhel 7.6 rados/thrash-erasure-code-shec/{ceph.yaml clusters/{fixed-4.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml} 4
pass 4216331 2019-08-15 06:59:14 2019-08-16 02:34:14 2019-08-16 03:02:14 0:28:00 0:13:30 0:14:30 mira master centos 7.6 rados/singleton-nomsgr/{all/pool-access.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216332 2019-08-15 06:59:15 2019-08-16 02:39:38 2019-08-16 02:59:38 0:20:00 0:08:17 0:11:43 mira master ubuntu 18.04 rados/monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/many.yaml workloads/rados_5925.yaml} 2
pass 4216333 2019-08-15 06:59:16 2019-08-16 02:40:52 2019-08-16 05:14:53 2:34:01 2:15:53 0:18:08 mira master rhel 7.6 rados/singleton/{all/max-pg-per-osd.from-mon.yaml msgr-failures/many.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
fail 4216334 2019-08-15 06:59:17 2019-08-16 02:52:26 2019-08-16 05:56:28 3:04:02 2:46:02 0:18:00 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/osd-delay.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/small-objects.yaml} 2
Failure Reason:

ceph-objectstore-tool: exp list-pgs failure with status 1

fail 4216335 2019-08-15 06:59:18 2019-08-16 02:52:27 2019-08-16 05:26:30 2:34:03 2:07:23 0:26:40 mira master rhel 7.6 rados/thrash-erasure-code-big/{ceph.yaml cluster/{12-osds.yaml openstack.yaml} msgr-failures/few.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml} 3
Failure Reason:

k --zap-all /dev/sdd', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-16 05:24:57.907896'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.046131', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2001655500'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N210EV5E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sde'}, 'ansible_loop_var': u'item', u'end': u'2019-08-16 05:25:00.269215', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2001655500'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N210EV5E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sde'}, u'cmd': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-16 05:24:59.223084'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.053117', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2012776300'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPS930N121G73V', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HDS721010CLA330', u'partitions': {}}, 'key': u'sdf'}, 'ansible_loop_var': u'item', u'end': u'2019-08-16 05:25:01.592016', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2012776300'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPS930N121G73V', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HDS721010CLA330', u'partitions': {}}, 'key': u'sdf'}, u'cmd': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-16 05:25:00.538899'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.046645', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP5A1FQ', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdg'}, 'ansible_loop_var': u'item', u'end': u'2019-08-16 05:25:02.902448', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP5A1FQ', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdg'}, u'cmd': u'sgdisk --zap-all /dev/sdg || sgdisk --zap-all /dev/sdg', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdg || sgdisk --zap-all /dev/sdg', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-16 05:25:01.855803'}, {'ansible_loop_var': u'item', '_ansible_no_log': False, 'skip_reason': u'Conditional result was False', 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP52BEJ', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f602365c-3e1b-4c7f-a435-7729abad47a6', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f602365c-3e1b-4c7f-a435-7729abad47a6']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}, 'skipped': True, 'changed': False, '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP52BEJ', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f602365c-3e1b-4c7f-a435-7729abad47a6', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f602365c-3e1b-4c7f-a435-7729abad47a6']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.025625', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP53FPZ', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdb'}, 'ansible_loop_var': u'item', u'end': u'2019-08-16 05:25:04.188982', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP53FPZ', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdb'}, u'cmd': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-16 05:25:03.163357'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.052774', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d208263c000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0HD2H3VPL', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdc'}, 'ansible_loop_var': u'item', u'end': u'2019-08-16 05:25:05.495739', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d208263c000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0HD2H3VPL', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdc'}, u'cmd': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-16 05:25:04.442965'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.053046', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'NA', u'links': {u'masters': [], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'PAJ55T7E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA721010KLA330', u'partitions': {}}, 'key': u'sdh'}, 'ansible_loop_var': u'item', u'end': u'2019-08-16 05:25:06.809679', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'NA', u'links': {u'masters': [], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'PAJ55T7E', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA721010KLA330', u'partitions': {}}, 'key': u'sdh'}, u'cmd': u'sgdisk --zap-all /dev/sdh || sgdisk --zap-all /dev/sdh', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdh || sgdisk --zap-all /dev/sdh', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-08-16 05:25:05.756633'}, {'stderr_lines': [u'Problem opening /dev/dm-0 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!', u'Problem opening /dev/dm-0 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!'], u'changed': True, u'stdout': u'', u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'delta': u'0:00:00.008478', 'stdout_lines': [], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'5VP5A1FQ', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, 'ansible_loop_var': u'item', u'end': u'2019-08-16 05:25:07.080869', '_ansible_no_log': False, u'start': u'2019-08-16 05:25:07.072391', u'failed': True, u'cmd': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'5VP5A1FQ', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, u'stderr': u"Problem opening /dev/dm-0 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-0 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", u'rc': 2, u'msg': u'non-zero return code'}]}}Traceback (most recent call last): File "/home/teuthworker/src/git.ceph.com_git_ceph-cm-ansible_master/callback_plugins/failure_log.py", line 44, in log_failure log.error(yaml.safe_dump(failure)) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 309, in safe_dump return dump_all([data], stream, Dumper=SafeDumper, **kwds) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 281, in dump_all dumper.represent(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 29, in represent node = self.represent_data(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 219, in represent_list return self.represent_sequence(u'tag:yaml.org,2002:seq', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 102, in represent_sequence node_item = self.represent_data(item) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 68, in represent_data node = self.yaml_representers[None](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 251, in represent_undefined raise RepresenterError("cannot represent an object", data)RepresenterError: ('cannot represent an object', u'sdd')

pass 4216336 2019-08-15 06:59:18 2019-08-16 02:59:40 2019-08-16 03:49:39 0:49:59 0:36:10 0:13:49 mira master centos 7.6 rados/thrash-erasure-code-overwrites/{bluestore-bitmap.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/few.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{centos_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-snaps-few-objects-overwrites.yaml} 2
fail 4216337 2019-08-15 06:59:19 2019-08-16 03:02:52 2019-08-16 04:18:52 1:16:00 1:04:32 0:11:28 mira master centos 7.6 rados/standalone/{supported-random-distro$/{centos_7.yaml} workloads/scrub.yaml} 1
Failure Reason:

Command failed (workunit test scrub/osd-scrub-snaps.sh) on mira002 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=01ecef99a688ad7972451d18d0cde5d57d99249a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/scrub/osd-scrub-snaps.sh'

pass 4216338 2019-08-15 06:59:20 2019-08-16 03:13:41 2019-08-16 03:33:40 0:19:59 0:10:34 0:09:25 mira master ubuntu 18.04 rados/singleton/{all/max-pg-per-osd.from-primary.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
fail 4216339 2019-08-15 06:59:21 2019-08-16 03:23:01 2019-08-16 03:55:00 0:31:59 0:22:25 0:09:34 mira master rhel 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/bluestore-stupid.yaml supported-random-distro$/{rhel_7.yaml} tasks/progress.yaml} 2
Failure Reason:

Test failure: test_osd_came_back (tasks.mgr.test_progress.TestProgress)

pass 4216340 2019-08-15 06:59:22 2019-08-16 03:33:42 2019-08-16 04:01:41 0:27:59 0:16:57 0:11:02 mira master centos 7.6 rados/perf/{ceph.yaml objectstore/bluestore-comp.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_7.yaml} workloads/radosbench_4M_rand_read.yaml} 1
pass 4216341 2019-08-15 06:59:23 2019-08-16 03:49:42 2019-08-16 04:51:42 1:02:00 0:42:11 0:19:49 mira master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-install/mimic.yaml backoff/normal.yaml ceph.yaml clusters/{openstack.yaml three-plus-one.yaml} d-balancer/off.yaml distro$/{centos_latest.yaml} msgr-failures/osd-delay.yaml rados.yaml thrashers/none.yaml thrashosds-health.yaml workloads/radosbench.yaml} 4
pass 4216342 2019-08-15 06:59:24 2019-08-16 03:55:02 2019-08-16 06:51:04 2:56:02 2:37:07 0:18:55 mira master rhel 7.6 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/peering.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/upmap.yaml msgr-failures/fastclose.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/none.yaml thrashosds-health.yaml workloads/snaps-few-objects.yaml} 2
pass 4216343 2019-08-15 06:59:25 2019-08-16 03:57:04 2019-08-16 04:35:03 0:37:59 0:21:26 0:16:33 mira master ubuntu 18.04 rados/basic/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} tasks/rados_api_tests.yaml} 2
pass 4216344 2019-08-15 06:59:26 2019-08-16 04:01:43 2019-08-16 04:25:42 0:23:59 0:13:46 0:10:13 mira master ubuntu 18.04 rados/singleton/{all/max-pg-per-osd.from-replica.yaml msgr-failures/many.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216345 2019-08-15 06:59:27 2019-08-16 04:19:07 2019-08-16 05:05:07 0:46:00 0:39:19 0:06:41 mira master rhel 7.6 rados/singleton-nomsgr/{all/recovery-unfound-found.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216346 2019-08-15 06:59:28 2019-08-16 04:25:44 2019-08-16 05:17:44 0:52:00 0:30:35 0:21:25 mira master centos 7.6 rados/thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{centos_7.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml} 2
pass 4216347 2019-08-15 06:59:28 2019-08-16 04:35:18 2019-08-16 05:23:18 0:48:00 0:25:21 0:22:39 mira master centos 7.6 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size.yaml 1-pg-log-overrides/short_pg_log.yaml 2-recovery-overrides/{default.yaml} backoff/peering_and_degraded.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/crush-compat.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/write_fadvise_dontneed.yaml} 2
pass 4216348 2019-08-15 06:59:29 2019-08-16 04:51:44 2019-08-16 05:27:43 0:35:59 0:25:14 0:10:45 mira master ubuntu 18.04 rados/thrash-erasure-code-isa/{arch/x86_64.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} msgr-failures/osd-delay.yaml objectstore/filestore-xfs.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=isa-k=2-m=1.yaml} 2
dead 4216349 2019-08-15 06:59:30 2019-08-16 05:05:09 2019-08-16 17:07:33 12:02:24 mira master ubuntu 18.04 rados/objectstore/{backends/objectstore.yaml supported-random-distro$/{ubuntu_latest.yaml}} 1
pass 4216350 2019-08-15 06:59:31 2019-08-16 05:11:05 2019-08-16 05:41:04 0:29:59 0:24:20 0:05:39 mira master rhel 7.6 rados/singleton/{all/mon-auth-caps.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml}} 1
pass 4216351 2019-08-15 06:59:32 2019-08-16 05:14:59 2019-08-16 05:42:59 0:28:00 0:16:45 0:11:15 mira master centos 7.6 rados/perf/{ceph.yaml objectstore/bluestore-low-osd-mem-target.yaml openstack.yaml settings/optimized.yaml supported-random-distro$/{centos_7.yaml} workloads/radosbench_4M_seq_read.yaml} 1
pass 4216352 2019-08-15 06:59:33 2019-08-16 05:17:45 2019-08-16 05:41:45 0:24:00 0:13:56 0:10:04 mira master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size.yaml 1-pg-log-overrides/normal_pg_log.yaml 2-recovery-overrides/{more-active-recovery.yaml} backoff/normal.yaml ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-balancer/off.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/admin_socket_objecter_requests.yaml} 2
pass 4216353 2019-08-15 06:59:34 2019-08-16 05:23:32 2019-08-16 05:49:31 0:25:59 0:12:41 0:13:18 mira master centos 7.6 rados/multimon/{clusters/6.yaml msgr-failures/many.yaml msgr/async.yaml no_pools.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} tasks/mon_clock_with_skews.yaml} 2
pass 4216354 2019-08-15 06:59:34 2019-08-16 05:26:32 2019-08-16 05:54:31 0:27:59 0:18:24 0:09:35 mira master rados/verify/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} d-thrash/none.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml tasks/rados_api_tests.yaml validater/lockdep.yaml} 2
pass 4216355 2019-08-15 06:59:35 2019-08-16 05:27:45 2019-08-16 06:15:45 0:48:00 0:18:41 0:29:19 mira master centos 7.6 rados/singleton/{all/mon-config-key-caps.yaml msgr-failures/many.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_7.yaml}} 1
pass 4216356 2019-08-15 06:59:36 2019-08-16 05:41:07 2019-08-16 06:13:06 0:31:59 0:18:54 0:13:05 mira master centos 7.6 rados/mgr/{clusters/{2-node-mgr.yaml openstack.yaml} debug/mgr.yaml objectstore/filestore-xfs.yaml supported-random-distro$/{centos_7.yaml} tasks/prometheus.yaml} 2