Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 4608432 2019-12-16 13:12:21 2019-12-16 13:14:10 2019-12-16 13:56:09 0:41:59 0:19:51 0:22:08 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/sync.yaml workloads/pool-create-delete.yaml} 2
pass 4608433 2019-12-16 13:12:23 2019-12-16 13:28:19 2019-12-16 16:08:22 2:40:03 2:22:47 0:17:16 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/force-sync-many.yaml workloads/rados_5925.yaml} 2
pass 4608434 2019-12-16 13:12:24 2019-12-16 13:35:48 2019-12-16 14:23:48 0:48:00 0:34:01 0:13:59 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/many.yaml workloads/rados_api_tests.yaml} 2
pass 4608435 2019-12-16 13:12:25 2019-12-16 13:56:24 2019-12-16 15:18:24 1:22:00 1:12:13 0:09:47 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/one.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608436 2019-12-16 13:12:26 2019-12-16 13:56:24 2019-12-16 14:56:24 1:00:00 0:37:53 0:22:07 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/sync-many.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

"2019-12-16T14:32:12.915083+0000 mon.b (mon.0) 186 : cluster [WRN] Health check failed: 1 pool(s) have no replicas configured (POOL_NO_REDUNDANCY)" in cluster log

pass 4608437 2019-12-16 13:12:27 2019-12-16 14:20:26 2019-12-16 15:14:26 0:54:00 0:45:41 0:08:19 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync.yaml workloads/snaps-few-objects.yaml} 2
pass 4608438 2019-12-16 13:12:28 2019-12-16 14:24:05 2019-12-16 14:50:05 0:26:00 0:15:07 0:10:53 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/force-sync-many.yaml workloads/pool-create-delete.yaml} 2
pass 4608439 2019-12-16 13:12:30 2019-12-16 14:50:07 2019-12-16 15:20:06 0:29:59 0:16:36 0:13:23 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/many.yaml workloads/rados_5925.yaml} 2
pass 4608440 2019-12-16 13:12:31 2019-12-16 14:56:42 2019-12-16 15:32:42 0:36:00 0:26:51 0:09:09 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/one.yaml workloads/rados_api_tests.yaml} 2
pass 4608441 2019-12-16 13:12:32 2019-12-16 15:14:44 2019-12-16 16:24:44 1:10:00 0:59:18 0:10:42 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync-many.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608442 2019-12-16 13:12:33 2019-12-16 15:18:43 2019-12-16 18:20:47 3:02:04 2:42:26 0:19:38 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

"2019-12-16T17:56:24.102019+0000 mon.b (mon.1) 97 : cluster [WRN] Health check failed: 1 pool(s) have no replicas configured (POOL_NO_REDUNDANCY)" in cluster log

pass 4608443 2019-12-16 13:12:34 2019-12-16 15:20:24 2019-12-16 16:12:24 0:52:00 0:39:03 0:12:57 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/force-sync-many.yaml workloads/snaps-few-objects.yaml} 2
pass 4608444 2019-12-16 13:12:35 2019-12-16 15:32:45 2019-12-16 16:16:44 0:43:59 0:22:47 0:21:12 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/many.yaml workloads/pool-create-delete.yaml} 2
pass 4608445 2019-12-16 13:12:36 2019-12-16 16:08:42 2019-12-16 16:28:41 0:19:59 0:09:15 0:10:44 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/one.yaml workloads/rados_5925.yaml} 2
pass 4608446 2019-12-16 13:12:37 2019-12-16 16:12:43 2019-12-16 19:06:47 2:54:04 2:35:10 0:18:54 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync-many.yaml workloads/rados_api_tests.yaml} 2
pass 4608447 2019-12-16 13:12:38 2019-12-16 16:17:02 2019-12-16 17:23:03 1:06:01 0:56:23 0:09:38 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608448 2019-12-16 13:12:39 2019-12-16 16:25:03 2019-12-16 17:03:03 0:38:00 0:27:52 0:10:08 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/force-sync-many.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

"2019-12-16T16:41:52.333843+0000 mon.a (mon.0) 121 : cluster [WRN] Health check failed: 1 pool(s) have no replicas configured (POOL_NO_REDUNDANCY)" in cluster log

pass 4608449 2019-12-16 13:12:40 2019-12-16 16:29:00 2019-12-16 17:21:00 0:52:00 0:42:19 0:09:41 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/many.yaml workloads/snaps-few-objects.yaml} 2
pass 4608450 2019-12-16 13:12:41 2019-12-16 17:03:06 2019-12-16 17:31:05 0:27:59 0:17:37 0:10:22 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/one.yaml workloads/pool-create-delete.yaml} 2
fail 4608451 2019-12-16 13:12:42 2019-12-16 17:21:03 2019-12-16 20:07:05 2:46:02 2:13:41 0:32:21 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync-many.yaml workloads/rados_5925.yaml} 2
Failure Reason:

module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdd || sgdisk --zap-all /dev/sdd', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-12-16 20:05:11.422533'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.012783', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [u'dm-3'], u'labels': [], u'ids': [u'lvm-pv-uuid-as3yev-TbNU-1v15-6UIO-P2Q5-8w6g-Yf2ou0', u'scsi-2001b4d2001655500'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N210EV5E', u'holders': [u'vg_hdd-lv_2'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sde'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:13.741156', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [u'dm-3'], u'labels': [], u'ids': [u'lvm-pv-uuid-as3yev-TbNU-1v15-6UIO-P2Q5-8w6g-Yf2ou0', u'scsi-2001b4d2001655500'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0N210EV5E', u'holders': [u'vg_hdd-lv_2'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sde'}, u'cmd': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sde || sgdisk --zap-all /dev/sde', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-12-16 20:05:12.728373'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.012066', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [u'dm-3', u'dm-4'], u'labels': [], u'ids': [u'lvm-pv-uuid-b8s3qL-3wdL-98r5-dFkH-sysm-UiGS-FFsDat', u'scsi-2001b4d2012776300'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPS930N121G73V', u'holders': [u'vg_hdd-lv_2', u'vg_hdd-lv_1'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HDS721010CLA330', u'partitions': {}}, 'key': u'sdf'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:15.017590', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [u'dm-3', u'dm-4'], u'labels': [], u'ids': [u'lvm-pv-uuid-b8s3qL-3wdL-98r5-dFkH-sysm-UiGS-FFsDat', u'scsi-2001b4d2012776300'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPS930N121G73V', u'holders': [u'vg_hdd-lv_2', u'vg_hdd-lv_1'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HDS721010CLA330', u'partitions': {}}, 'key': u'sdf'}, u'cmd': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdf || sgdisk --zap-all /dev/sdf', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-12-16 20:05:14.005524'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011862', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'NA', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'PAJ55T7E', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA721010KLA330', u'partitions': {}}, 'key': u'sdg'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:16.294009', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'NA', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'PAJ55T7E', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA721010KLA330', u'partitions': {}}, 'key': u'sdg'}, u'cmd': u'sgdisk --zap-all /dev/sdg || sgdisk --zap-all /dev/sdg', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdg || sgdisk --zap-all /dev/sdg', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-12-16 20:05:15.282147'}, {'ansible_loop_var': u'item', '_ansible_no_log': False, 'skip_reason': u'Conditional result was False', 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP52BEJ', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f1bca609-9cd3-46ed-8f34-af0538e7246e', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f1bca609-9cd3-46ed-8f34-af0538e7246e']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}, 'skipped': True, 'changed': False, '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP52BEJ', u'holders': [], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {u'sda1': {u'start': u'2048', u'sectorsize': 512, u'uuid': u'f1bca609-9cd3-46ed-8f34-af0538e7246e', u'sectors': u'1953522688', u'holders': [], u'links': {u'masters': [], u'labels': [], u'ids': [u'scsi-2001b4d2000000000-part1'], u'uuids': [u'f1bca609-9cd3-46ed-8f34-af0538e7246e']}, u'size': u'931.51 GB'}}}, 'key': u'sda'}}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.012044', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP53FPZ', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdb'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:17.581558', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Seagate', u'links': {u'masters': [u'dm-0'], u'labels': [], u'ids': [], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'5VP53FPZ', u'holders': [u'mpatha'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'ST31000528AS', u'partitions': {}}, 'key': u'sdb'}, u'cmd': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdb || sgdisk --zap-all /dev/sdb', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-12-16 20:05:16.569514'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.011907', 'stdout_lines': [u'Creating new GPT entries.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [u'dm-1', u'dm-2'], u'labels': [], u'ids': [u'lvm-pv-uuid-TbjbGf-o3e2-zgkT-w64U-NjZi-VMf3-elUcTM', u'scsi-2001b4d208263c000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0HD2H3VPL', u'holders': [u'vg_hdd-lv_4', u'vg_hdd-lv_3'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdc'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:18.863588', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': u'Hitachi', u'links': {u'masters': [u'dm-1', u'dm-2'], u'labels': [], u'ids': [u'lvm-pv-uuid-TbjbGf-o3e2-zgkT-w64U-NjZi-VMf3-elUcTM', u'scsi-2001b4d208263c000'], u'uuids': []}, u'sas_device_handle': None, u'host': u'RAID bus controller: Areca Technology Corp. ARC-1680 series PCIe to SAS/SATA 3Gb RAID Controller', u'support_discard': u'0', u'serial': u'JPW9K0HD2H3VPL', u'holders': [u'vg_hdd-lv_4', u'vg_hdd-lv_3'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': u'HUA722010CLA330', u'partitions': {}}, 'key': u'sdc'}, u'cmd': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/sdc || sgdisk --zap-all /dev/sdc', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-12-16 20:05:17.851681'}, {'stderr_lines': [u'Problem opening /dev/dm-4 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!', u'Problem opening /dev/dm-4 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!'], u'changed': True, u'stdout': u'', u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-4 || sgdisk --zap-all /dev/dm-4', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'delta': u'0:00:00.008618', 'stdout_lines': [], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-vg_hdd-lv_1', u'dm-uuid-LVM-9y7wqpY7Tn5keRwFH5rMEZ5njgOH7J5kjf3QBfjzJeFPEcBUGbgtOLJB32jUn1HG'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'model': None, u'size': u'1.36 TB', u'scheduler_mode': u'', u'rotational': u'1', u'sectors': u'2930278400', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'holders': [], u'partitions': {}}, 'key': u'dm-4'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:19.132391', '_ansible_no_log': False, u'start': u'2019-12-16 20:05:19.123773', u'failed': True, u'cmd': u'sgdisk --zap-all /dev/dm-4 || sgdisk --zap-all /dev/dm-4', 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-vg_hdd-lv_1', u'dm-uuid-LVM-9y7wqpY7Tn5keRwFH5rMEZ5njgOH7J5kjf3QBfjzJeFPEcBUGbgtOLJB32jUn1HG'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'model': None, u'size': u'1.36 TB', u'scheduler_mode': u'', u'rotational': u'1', u'sectors': u'2930278400', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'holders': [], u'partitions': {}}, 'key': u'dm-4'}, u'stderr': u"Problem opening /dev/dm-4 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-4 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", u'rc': 2, u'msg': u'non-zero return code'}, {'stderr_lines': [u'Problem opening /dev/dm-2 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!', u'Problem opening /dev/dm-2 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!'], u'changed': True, u'stdout': u'', u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-2 || sgdisk --zap-all /dev/dm-2', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'delta': u'0:00:00.008719', 'stdout_lines': [], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-vg_hdd-lv_3', u'dm-uuid-LVM-9y7wqpY7Tn5keRwFH5rMEZ5njgOH7J5kHKqhkz701TWXlbC5yasp3jpX9tHmZncU'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'model': None, u'size': u'1.36 TB', u'scheduler_mode': u'', u'rotational': u'1', u'sectors': u'2930278400', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'holders': [], u'partitions': {}}, 'key': u'dm-2'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:19.401922', '_ansible_no_log': False, u'start': u'2019-12-16 20:05:19.393203', u'failed': True, u'cmd': u'sgdisk --zap-all /dev/dm-2 || sgdisk --zap-all /dev/dm-2', 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-vg_hdd-lv_3', u'dm-uuid-LVM-9y7wqpY7Tn5keRwFH5rMEZ5njgOH7J5kHKqhkz701TWXlbC5yasp3jpX9tHmZncU'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'model': None, u'size': u'1.36 TB', u'scheduler_mode': u'', u'rotational': u'1', u'sectors': u'2930278400', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'holders': [], u'partitions': {}}, 'key': u'dm-2'}, u'stderr': u"Problem opening /dev/dm-2 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-2 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", u'rc': 2, u'msg': u'non-zero return code'}, {'stderr_lines': [u'Problem opening /dev/dm-3 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!', u'Problem opening /dev/dm-3 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!'], u'changed': True, u'stdout': u'', u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-3 || sgdisk --zap-all /dev/dm-3', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'delta': u'0:00:00.008605', 'stdout_lines': [], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-vg_hdd-lv_2', u'dm-uuid-LVM-9y7wqpY7Tn5keRwFH5rMEZ5njgOH7J5kExEQPSoqcdMGlMm8GVdA7BuNJSNprU35'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'model': None, u'size': u'1.36 TB', u'scheduler_mode': u'', u'rotational': u'1', u'sectors': u'2930278400', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'holders': [], u'partitions': {}}, 'key': u'dm-3'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:19.662219', '_ansible_no_log': False, u'start': u'2019-12-16 20:05:19.653614', u'failed': True, u'cmd': u'sgdisk --zap-all /dev/dm-3 || sgdisk --zap-all /dev/dm-3', 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-vg_hdd-lv_2', u'dm-uuid-LVM-9y7wqpY7Tn5keRwFH5rMEZ5njgOH7J5kExEQPSoqcdMGlMm8GVdA7BuNJSNprU35'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'model': None, u'size': u'1.36 TB', u'scheduler_mode': u'', u'rotational': u'1', u'sectors': u'2930278400', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'holders': [], u'partitions': {}}, 'key': u'dm-3'}, u'stderr': u"Problem opening /dev/dm-3 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-3 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", u'rc': 2, u'msg': u'non-zero return code'}, {'stderr_lines': [], u'changed': True, u'stdout': u'Creating new GPT entries.\nWarning: The kernel is still using the old partition table.\nThe new table will be used at the next reboot.\nGPT data structures destroyed! You may now partition the disk using fdisk or\nother utilities.', u'delta': u'0:00:01.032804', 'stdout_lines': [u'Creating new GPT entries.', u'Warning: The kernel is still using the old partition table.', u'The new table will be used at the next reboot.', u'GPT data structures destroyed! You may now partition the disk using fdisk or', u'other utilities.'], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [u'dm-1', u'dm-4'], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000', u'lvm-pv-uuid-hmxWgO-3g61-4EaN-FcEg-u3Fx-8idb-ZOfW30'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'PAJ55T7E', u'holders': [u'vg_hdd-lv_4', u'vg_hdd-lv_1'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:20.940918', '_ansible_no_log': False, 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [u'dm-1', u'dm-4'], u'labels': [], u'ids': [u'dm-name-mpatha', u'dm-uuid-mpath-2001b4d2000000000', u'lvm-pv-uuid-hmxWgO-3g61-4EaN-FcEg-u3Fx-8idb-ZOfW30'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'serial': u'PAJ55T7E', u'holders': [u'vg_hdd-lv_4', u'vg_hdd-lv_1'], u'size': u'931.51 GB', u'scheduler_mode': u'deadline', u'rotational': u'1', u'sectors': u'1953525168', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'model': None, u'partitions': {}}, 'key': u'dm-0'}, u'cmd': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', 'failed': False, u'stderr': u'', u'rc': 0, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'start': u'2019-12-16 20:05:19.908114'}, {'stderr_lines': [u'Problem opening /dev/dm-1 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!', u'Problem opening /dev/dm-1 for reading! Error is 2.', u'The specified file does not exist!', u"Problem opening '' for writing! Program will now terminate.", u'Warning! MBR not overwritten! Error is 2!'], u'changed': True, u'stdout': u'', u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': True, u'strip_empty_ends': True, u'_raw_params': u'sgdisk --zap-all /dev/dm-1 || sgdisk --zap-all /dev/dm-1', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin_add_newline': True, u'stdin': None}}, u'delta': u'0:00:00.008593', 'stdout_lines': [], '_ansible_item_label': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-vg_hdd-lv_4', u'dm-uuid-LVM-9y7wqpY7Tn5keRwFH5rMEZ5njgOH7J5ktp8Oru41cRHoaMGzPUTuxKY8zoRblcy4'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'model': None, u'size': u'1.36 TB', u'scheduler_mode': u'', u'rotational': u'1', u'sectors': u'2930278400', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'holders': [], u'partitions': {}}, 'key': u'dm-1'}, 'ansible_loop_var': u'item', u'end': u'2019-12-16 20:05:21.213757', '_ansible_no_log': False, u'start': u'2019-12-16 20:05:21.205164', u'failed': True, u'cmd': u'sgdisk --zap-all /dev/dm-1 || sgdisk --zap-all /dev/dm-1', 'item': {'value': {u'sectorsize': u'512', u'vendor': None, u'links': {u'masters': [], u'labels': [], u'ids': [u'dm-name-vg_hdd-lv_4', u'dm-uuid-LVM-9y7wqpY7Tn5keRwFH5rMEZ5njgOH7J5ktp8Oru41cRHoaMGzPUTuxKY8zoRblcy4'], u'uuids': []}, u'sas_device_handle': None, u'host': u'', u'support_discard': u'0', u'model': None, u'size': u'1.36 TB', u'scheduler_mode': u'', u'rotational': u'1', u'sectors': u'2930278400', u'sas_address': None, u'virtual': 1, u'removable': u'0', u'holders': [], u'partitions': {}}, 'key': u'dm-1'}, u'stderr': u"Problem opening /dev/dm-1 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-1 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", u'rc': 2, u'msg': u'non-zero return code'}]}}Traceback (most recent call last): File "/home/teuthworker/src/git.ceph.com_git_ceph-cm-ansible_master/callback_plugins/failure_log.py", line 44, in log_failure log.error(yaml.safe_dump(failure)) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 309, in safe_dump return dump_all([data], stream, Dumper=SafeDumper, **kwds) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/__init__.py", line 281, in dump_all dumper.represent(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 29, in represent node = self.represent_data(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 219, in represent_list return self.represent_sequence(u'tag:yaml.org,2002:seq', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 102, in represent_sequence node_item = self.represent_data(item) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 227, in represent_dict return self.represent_mapping(u'tag:yaml.org,2002:map', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 125, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 68, in represent_data node = self.yaml_representers[None](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/yaml/representer.py", line 251, in represent_undefined raise RepresenterError("cannot represent an object", data)RepresenterError: ('cannot represent an object', u'sdd')

pass 4608452 2019-12-16 13:12:43 2019-12-16 17:23:05 2019-12-16 18:11:04 0:47:59 0:33:04 0:14:55 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/sync.yaml workloads/rados_api_tests.yaml} 2
pass 4608453 2019-12-16 13:12:44 2019-12-16 17:31:07 2019-12-16 21:09:11 3:38:04 3:17:11 0:20:53 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/force-sync-many.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608454 2019-12-16 13:12:45 2019-12-16 18:11:06 2019-12-16 19:07:06 0:56:00 0:41:05 0:14:55 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/many.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

"2019-12-16T18:38:36.089397+0000 mon.b (mon.1) 106 : cluster [WRN] Health check failed: 1 pool(s) have no replicas configured (POOL_NO_REDUNDANCY)" in cluster log

pass 4608455 2019-12-16 13:12:46 2019-12-16 18:20:56 2019-12-16 19:14:55 0:53:59 0:43:12 0:10:47 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/one.yaml workloads/snaps-few-objects.yaml} 2
pass 4608456 2019-12-16 13:12:47 2019-12-16 19:07:10 2019-12-16 19:49:11 0:42:01 0:20:10 0:21:51 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/sync-many.yaml workloads/pool-create-delete.yaml} 2
pass 4608457 2019-12-16 13:12:48 2019-12-16 19:07:10 2019-12-16 19:39:11 0:32:01 0:24:13 0:07:48 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync.yaml workloads/rados_5925.yaml} 2
pass 4608458 2019-12-16 13:12:49 2019-12-16 19:15:14 2019-12-16 20:13:15 0:58:01 0:34:27 0:23:34 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/force-sync-many.yaml workloads/rados_api_tests.yaml} 2
pass 4608459 2019-12-16 13:12:51 2019-12-16 19:39:15 2019-12-16 21:05:17 1:26:02 1:15:24 0:10:38 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/many.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608460 2019-12-16 13:12:51 2019-12-16 19:49:34 2019-12-16 20:29:33 0:39:59 0:29:01 0:10:58 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/one.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

Command failed (workunit test mon/caps.sh) on mira077 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=35b0b52153206fd6178afe584a2cd9c0ef2bcc05 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/mon/caps.sh'

pass 4608461 2019-12-16 13:12:53 2019-12-16 19:56:37 2019-12-16 20:54:37 0:58:00 0:50:14 0:07:46 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync-many.yaml workloads/snaps-few-objects.yaml} 2
pass 4608462 2019-12-16 13:12:54 2019-12-16 19:56:38 2019-12-16 20:30:36 0:33:58 0:19:58 0:14:00 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/sync.yaml workloads/pool-create-delete.yaml} 2
dead 4608463 2019-12-16 13:12:55 2019-12-16 19:56:38 2019-12-16 20:12:36 0:15:58 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/force-sync-many.yaml workloads/rados_5925.yaml}
Failure Reason:

reached maximum tries (60) after waiting for 900 seconds

pass 4608464 2019-12-16 13:12:56 2019-12-16 19:56:38 2019-12-16 20:42:37 0:45:59 0:32:44 0:13:15 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/many.yaml workloads/rados_api_tests.yaml} 2
pass 4608465 2019-12-16 13:12:56 2019-12-16 19:56:38 2019-12-16 21:34:38 1:38:00 1:29:34 0:08:26 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/one.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608466 2019-12-16 13:12:57 2019-12-16 19:56:39 2019-12-16 20:44:37 0:47:58 0:39:20 0:08:38 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync-many.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

"2019-12-16T20:24:28.633151+0000 mon.a (mon.0) 126 : cluster [WRN] Health check failed: 1 pool(s) have no replicas configured (POOL_NO_REDUNDANCY)" in cluster log

pass 4608467 2019-12-16 13:12:58 2019-12-16 19:56:38 2019-12-16 20:48:37 0:51:59 0:42:35 0:09:24 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync.yaml workloads/snaps-few-objects.yaml} 2
pass 4608468 2019-12-16 13:12:59 2019-12-16 19:56:39 2019-12-16 20:30:37 0:33:58 0:19:55 0:14:03 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/force-sync-many.yaml workloads/pool-create-delete.yaml} 2
pass 4608469 2019-12-16 13:13:00 2019-12-16 19:56:39 2019-12-16 20:26:37 0:29:58 0:09:13 0:20:45 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/many.yaml workloads/rados_5925.yaml} 2
pass 4608470 2019-12-16 13:13:01 2019-12-16 19:56:39 2019-12-16 20:42:37 0:45:58 0:32:43 0:13:15 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/one.yaml workloads/rados_api_tests.yaml} 2
pass 4608471 2019-12-16 13:13:02 2019-12-16 19:56:39 2019-12-16 21:28:39 1:32:00 1:18:01 0:13:59 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/sync-many.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608472 2019-12-16 13:13:03 2019-12-16 19:56:39 2019-12-16 20:50:38 0:53:59 0:28:25 0:25:34 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

"2019-12-16T20:28:44.242559+0000 mon.b (mon.1) 76 : cluster [WRN] Health check failed: 1 pool(s) have no replicas configured (POOL_NO_REDUNDANCY)" in cluster log

pass 4608473 2019-12-16 13:13:04 2019-12-16 19:56:39 2019-12-16 21:30:39 1:34:00 0:47:46 0:46:14 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/force-sync-many.yaml workloads/snaps-few-objects.yaml} 2
pass 4608474 2019-12-16 13:13:05 2019-12-16 19:56:40 2019-12-16 21:16:39 1:19:59 0:28:45 0:51:14 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/many.yaml workloads/pool-create-delete.yaml} 2
pass 4608475 2019-12-16 13:13:06 2019-12-16 19:56:39 2019-12-16 23:12:41 3:16:02 2:22:54 0:53:08 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/one.yaml workloads/rados_5925.yaml} 2
pass 4608476 2019-12-16 13:13:08 2019-12-16 23:25:08 2019-12-17 00:19:08 0:54:00 0:36:44 0:17:16 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync-many.yaml workloads/rados_api_tests.yaml} 2
pass 4608477 2019-12-16 13:13:09 2019-12-16 23:37:07 2019-12-17 00:53:07 1:16:00 1:01:43 0:14:17 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608478 2019-12-16 13:13:10 2019-12-16 23:40:46 2019-12-17 00:30:46 0:50:00 0:26:41 0:23:19 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/force-sync-many.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

"2019-12-17T00:10:40.886124+0000 mon.a (mon.1) 54 : cluster [WRN] Health check failed: 1 pool(s) have no replicas configured (POOL_NO_REDUNDANCY)" in cluster log

pass 4608479 2019-12-16 13:13:10 2019-12-16 23:41:52 2019-12-17 01:05:52 1:24:00 0:53:47 0:30:13 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/many.yaml workloads/snaps-few-objects.yaml} 2
pass 4608480 2019-12-16 13:13:11 2019-12-16 23:55:04 2019-12-17 03:15:06 3:20:02 2:27:47 0:52:15 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v1only.yaml objectstore/bluestore-bitmap.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/one.yaml workloads/pool-create-delete.yaml} 2
pass 4608481 2019-12-16 13:13:12 2019-12-16 23:57:06 2019-12-17 00:33:04 0:35:58 0:11:11 0:24:47 mira master ubuntu 18.04 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v2only.yaml objectstore/bluestore-comp.yaml rados.yaml supported-random-distro$/{ubuntu_latest.yaml} thrashers/sync-many.yaml workloads/rados_5925.yaml} 2
pass 4608482 2019-12-16 13:13:13 2019-12-17 00:03:37 2019-12-17 00:59:35 0:55:58 0:36:10 0:19:48 mira master rhel 7.7 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async.yaml objectstore/bluestore-low-osd-mem-target.yaml rados.yaml supported-random-distro$/{rhel_7.yaml} thrashers/sync.yaml workloads/rados_api_tests.yaml} 2
pass 4608483 2019-12-16 13:13:14 2019-12-17 00:05:16 2019-12-17 02:25:18 2:20:02 1:21:12 0:58:50 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async-v1only.yaml objectstore/bluestore-stupid.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/force-sync-many.yaml workloads/rados_mon_osdmap_prune.yaml} 2
fail 4608484 2019-12-16 13:13:15 2019-12-17 00:12:45 2019-12-17 01:14:46 1:02:01 0:41:36 0:20:25 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/3-mons.yaml msgr-failures/few.yaml msgr/async-v2only.yaml objectstore/filestore-xfs.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/many.yaml workloads/rados_mon_workunits.yaml} 2
Failure Reason:

"2019-12-17T00:46:51.864176+0000 mon.a (mon.0) 186 : cluster [WRN] Health check failed: 1 pool(s) have no replicas configured (POOL_NO_REDUNDANCY)" in cluster log

pass 4608485 2019-12-16 13:13:16 2019-12-17 00:17:04 2019-12-17 01:33:04 1:16:00 0:50:52 0:25:08 mira master centos 7.6 rados:monthrash/{ceph.yaml clusters/9-mons.yaml msgr-failures/mon-delay.yaml msgr/async.yaml objectstore/bluestore-avl.yaml rados.yaml supported-random-distro$/{centos_7.yaml} thrashers/one.yaml workloads/snaps-few-objects.yaml} 2