Description: rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/classic msgr-failures/osd-delay msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/cache-pool-snaps-readproxy}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2021-06-06_07:01:03-rados-master-distro-basic-gibba/6155877/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=de9ec4f31c49478f83e83b991aed952d

Failure Reason:

{'Failure object was': {'gibba040.front.sepia.ceph.com': {'results': [{'cmd': 'sgdisk --zap-all /dev/dm-1 || sgdisk --zap-all /dev/dm-1', 'stdout': 'Creating new GPT entries.\\nWarning: The kernel is still using the old partition table.\\nThe new table will be used at the next reboot or after you\\nrun partprobe(8) or kpartx(8)\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': '', 'rc': 0, 'start': '2021-06-23 12:05:55.764178', 'end': '2021-06-23 12:05:56.837521', 'delta': '0:00:01.073343', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-1 || sgdisk --zap-all /dev/dm-1', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'Warning: The kernel is still using the old partition table.', 'The new table will be used at the next reboot or after you', 'run partprobe(8) or kpartx(8)', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': [], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'dm-1', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_2', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFpdj0uu8VGcj3zhHP3BT6MhX1ccqB2PtN'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-1', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_2', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFpdj0uu8VGcj3zhHP3BT6MhX1ccqB2PtN'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}}, {'cmd': 'sgdisk --zap-all /dev/nvme0n1 || sgdisk --zap-all /dev/nvme0n1', 'stdout': 'Creating new GPT entries.\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': '', 'rc': 0, 'start': '2021-06-23 12:05:56.975054', 'end': '2021-06-23 12:05:58.027203', 'delta': '0:00:01.052149', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/nvme0n1 || sgdisk --zap-all /dev/nvme0n1', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': [], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'nvme0n1', 'value': {'virtual': 1, 'links': {'ids': ['lvm-pv-uuid-iVdqm6-FNBB-4tVz-feBG-Fl7s-Npha-NE31Me', 'nvme-INTEL_SSDPEL1K375GA_PHKM913400J3375A', 'nvme-nvme.8086-50484b4d3931333430304a3333373541-494e54454c2053534450454c314b3337354741-00000001'], 'uuids': [], 'labels': [], 'masters': ['dm-0', 'dm-1', 'dm-2', 'dm-3', 'dm-4']}, 'vendor': 'None', 'model': 'INTEL SSDPEL1K375GA', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '732585168', 'sectorsize': '512', 'size': '349.32 GB', 'host': 'Non-Volatile memory controller: Intel Corporation NVMe Datacenter SSD [Optane]', 'holders': ['vg_nvme-lv_2', 'vg_nvme-lv_5', 'vg_nvme-lv_3', 'vg_nvme-lv_1', 'vg_nvme-lv_4']}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'nvme0n1', 'value': {'virtual': 1, 'links': {'ids': ['lvm-pv-uuid-iVdqm6-FNBB-4tVz-feBG-Fl7s-Npha-NE31Me', 'nvme-INTEL_SSDPEL1K375GA_PHKM913400J3375A', 'nvme-nvme.8086-50484b4d3931333430304a3333373541-494e54454c2053534450454c314b3337354741-00000001'], 'uuids': [], 'labels': [], 'masters': ['dm-0', 'dm-1', 'dm-2', 'dm-3', 'dm-4']}, 'vendor': 'None', 'model': 'INTEL SSDPEL1K375GA', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '732585168', 'sectorsize': '512', 'size': '349.32 GB', 'host': 'Non-Volatile memory controller: Intel Corporation NVMe Datacenter SSD [Optane]', 'holders': ['vg_nvme-lv_2', 'vg_nvme-lv_5', 'vg_nvme-lv_3', 'vg_nvme-lv_1', 'vg_nvme-lv_4']}}}, {'cmd': 'sgdisk --zap-all /dev/dm-4 || sgdisk --zap-all /dev/dm-4', 'stdout': 'Creating new GPT entries.\\nWarning: The kernel is still using the old partition table.\\nThe new table will be used at the next reboot or after you\\nrun partprobe(8) or kpartx(8)\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': '', 'rc': 0, 'start': '2021-06-23 12:05:58.160552', 'end': '2021-06-23 12:05:59.221434', 'delta': '0:00:01.060882', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-4 || sgdisk --zap-all /dev/dm-4', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'Warning: The kernel is still using the old partition table.', 'The new table will be used at the next reboot or after you', 'run partprobe(8) or kpartx(8)', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': [], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'dm-4', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_5', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFlcaaj81Mopo31Qoyu1iQZzD1JlE3Lg7g'], 'uuids': ['6ae894fb-370d-4368-bcd6-00a2b169d69d'], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '27262976', 'sectorsize': '512', 'size': '13.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-4', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_5', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFlcaaj81Mopo31Qoyu1iQZzD1JlE3Lg7g'], 'uuids': ['6ae894fb-370d-4368-bcd6-00a2b169d69d'], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '27262976', 'sectorsize': '512', 'size': '13.00 GB', 'host': '', 'holders': []}}}, {'cmd': 'sgdisk --zap-all /dev/dm-2 || sgdisk --zap-all /dev/dm-2', 'stdout': 'Creating new GPT entries.\\nWarning: The kernel is still using the old partition table.\\nThe new table will be used at the next reboot or after you\\nrun partprobe(8) or kpartx(8)\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': '', 'rc': 0, 'start': '2021-06-23 12:05:59.357466', 'end': '2021-06-23 12:06:00.427944', 'delta': '0:00:01.070478', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-2 || sgdisk --zap-all /dev/dm-2', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'Warning: The kernel is still using the old partition table.', 'The new table will be used at the next reboot or after you', 'run partprobe(8) or kpartx(8)', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': [], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'dm-2', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_3', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFk9SXRRj3epmMqJbKzBeC3iyBXdclk1aR'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-2', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_3', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFk9SXRRj3epmMqJbKzBeC3iyBXdclk1aR'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}}, {'cmd': 'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', 'stdout': 'Creating new GPT entries.\\nWarning: The kernel is still using the old partition table.\\nThe new table will be used at the next reboot or after you\\nrun partprobe(8) or kpartx(8)\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': 'Warning! MBR not overwritten! Error is 5!', 'rc': 0, 'start': '2021-06-23 12:06:00.570109', 'end': '2021-06-23 12:06:01.734130', 'delta': '0:00:01.164021', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'Warning: The kernel is still using the old partition table.', 'The new table will be used at the next reboot or after you', 'run partprobe(8) or kpartx(8)', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': ['Warning! MBR not overwritten! Error is 5!'], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'dm-0', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_1', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFBQDfQaBnMz7zMHah25aOxtoo9v6apraQ'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-0', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_1', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFBQDfQaBnMz7zMHah25aOxtoo9v6apraQ'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}}, {'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': {'key': 'sda', 'value': {'virtual': 1, 'links': {'ids': ['ata-TOSHIBA_MG04ACA100N_Y9I5K31DF6XF', 'scsi-0ATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF', 'scsi-1ATA_TOSHIBA_MG04ACA100N_Y9I5K31DF6XF', 'scsi-350000399ab901d7d', 'scsi-SATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF', 'wwn-0x50000399ab901d7d'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'ATA', 'model': 'TOSHIBA MG04ACA1', 'sas_address': 'None', 'sas_device_handle': 'None', 'serial': 'Y9I5K31DF6XF', 'removable': '0', 'support_discard': '0', 'wwn': '0x50000399ab901d7d', 'partitions': {'sda1': {'links': {'ids': ['ata-TOSHIBA_MG04ACA100N_Y9I5K31DF6XF-part1', 'scsi-0ATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF-part1', 'scsi-1ATA_TOSHIBA_MG04ACA100N_Y9I5K31DF6XF-part1', 'scsi-350000399ab901d7d-part1', 'scsi-SATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF-part1', 'wwn-0x50000399ab901d7d-part1'], 'uuids': ['3b6f09d8-1f6d-44b1-8eae-585ac69873c0'], 'labels': [], 'masters': []}, 'start': '2048', 'sectors': '1953522688', 'sectorsize': 512, 'size': '931.51 GB', 'uuid': '3b6f09d8-1f6d-44b1-8eae-585ac69873c0', 'holders': []}}, 'rotational': '1', 'scheduler_mode': 'mq-deadline', 'sectors': '1953525168', 'sectorsize': '512', 'size': '931.51 GB', 'host': 'SATA controller: Intel Corporation Cannon Lake PCH SATA AHCI Controller (rev 10)', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'sda', 'value': {'virtual': 1, 'links': {'ids': ['ata-TOSHIBA_MG04ACA100N_Y9I5K31DF6XF', 'scsi-0ATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF', 'scsi-1ATA_TOSHIBA_MG04ACA100N_Y9I5K31DF6XF', 'scsi-350000399ab901d7d', 'scsi-SATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF', 'wwn-0x50000399ab901d7d'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'ATA', 'model': 'TOSHIBA MG04ACA1', 'sas_address': 'None', 'sas_device_handle': 'None', 'serial': 'Y9I5K31DF6XF', 'removable': '0', 'support_discard': '0', 'wwn': '0x50000399ab901d7d', 'partitions': {'sda1': {'links': {'ids': ['ata-TOSHIBA_MG04ACA100N_Y9I5K31DF6XF-part1', 'scsi-0ATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF-part1', 'scsi-1ATA_TOSHIBA_MG04ACA100N_Y9I5K31DF6XF-part1', 'scsi-350000399ab901d7d-part1', 'scsi-SATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF-part1', 'wwn-0x50000399ab901d7d-part1'], 'uuids': ['3b6f09d8-1f6d-44b1-8eae-585ac69873c0'], 'labels': [], 'masters': []}, 'start': '2048', 'sectors': '1953522688', 'sectorsize': 512, 'size': '931.51 GB', 'uuid': '3b6f09d8-1f6d-44b1-8eae-585ac69873c0', 'holders': []}}, 'rotational': '1', 'scheduler_mode': 'mq-deadline', 'sectors': '1953525168', 'sectorsize': '512', 'size': '931.51 GB', 'host': 'SATA controller: Intel Corporation Cannon Lake PCH SATA AHCI Controller (rev 10)', 'holders': []}}}, {'msg': 'non-zero return code', 'cmd': 'sgdisk --zap-all /dev/dm-3 || sgdisk --zap-all /dev/dm-3', 'stdout': '', 'stderr': "Problem opening /dev/dm-3 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-3 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", 'rc': 2, 'start': '2021-06-23 12:06:01.877778', 'end': '2021-06-23 12:06:01.881014', 'delta': '0:00:00.003236', 'changed': True, 'failed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-3 || sgdisk --zap-all /dev/dm-3', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': [], 'stderr_lines': ['Problem opening /dev/dm-3 for reading! Error is 2.', 'The specified file does not exist!', "Problem opening '' for writing! Program will now terminate.", 'Warning! MBR not overwritten! Error is 2!', 'Problem opening /dev/dm-3 for reading! Error is 2.', 'The specified file does not exist!', "Problem opening '' for writing! Program will now terminate.", 'Warning! MBR not overwritten! Error is 2!'], '_ansible_no_log': False, 'item': {'key': 'dm-3', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_4', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFq11rAGCfcdbm6RAvaPqOSzKWpSU7IxFb'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-3', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_4', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFq11rAGCfcdbm6RAvaPqOSzKWpSU7IxFb'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}}], 'changed': True, 'msg': 'All items completed'}}, 'Traceback (most recent call last)': 'File "/home/teuthworker/src/git.ceph.com_git_ceph-cm-ansible_master/callback_plugins/failure_log.py", line 44, in log_failure log.error(yaml.safe_dump(failure)) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/__init__.py", line 306, in safe_dump return dump_all([data], stream, Dumper=SafeDumper, **kwds) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/__init__.py", line 278, in dump_all dumper.represent(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 27, in represent node = self.represent_data(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict return self.represent_mapping(\'tag:yaml.org,2002:map\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 118, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict return self.represent_mapping(\'tag:yaml.org,2002:map\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 118, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 199, in represent_list return self.represent_sequence(\'tag:yaml.org,2002:seq\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 92, in represent_sequence node_item = self.represent_data(item) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict return self.represent_mapping(\'tag:yaml.org,2002:map\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 118, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict return self.represent_mapping(\'tag:yaml.org,2002:map\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 117, in represent_mapping node_key = self.represent_data(item_key) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[None](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 231, in represent_undefined raise RepresenterError("cannot represent an object", data)', 'yaml.representer.RepresenterError': "('cannot represent an object', 'key')"}

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2021-06-06_07:01:03-rados-master-distro-basic-gibba/6155877/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2021-06-06_07:01:03-rados-master-distro-basic-gibba/6155877
  • description: rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/classic msgr-failures/osd-delay msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/cache-pool-snaps-readproxy}
  • duration: 0:05:44
  • email: ceph-qa@ceph.io
  • failure_reason: {'Failure object was': {'gibba040.front.sepia.ceph.com': {'results': [{'cmd': 'sgdisk --zap-all /dev/dm-1 || sgdisk --zap-all /dev/dm-1', 'stdout': 'Creating new GPT entries.\\nWarning: The kernel is still using the old partition table.\\nThe new table will be used at the next reboot or after you\\nrun partprobe(8) or kpartx(8)\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': '', 'rc': 0, 'start': '2021-06-23 12:05:55.764178', 'end': '2021-06-23 12:05:56.837521', 'delta': '0:00:01.073343', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-1 || sgdisk --zap-all /dev/dm-1', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'Warning: The kernel is still using the old partition table.', 'The new table will be used at the next reboot or after you', 'run partprobe(8) or kpartx(8)', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': [], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'dm-1', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_2', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFpdj0uu8VGcj3zhHP3BT6MhX1ccqB2PtN'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-1', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_2', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFpdj0uu8VGcj3zhHP3BT6MhX1ccqB2PtN'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}}, {'cmd': 'sgdisk --zap-all /dev/nvme0n1 || sgdisk --zap-all /dev/nvme0n1', 'stdout': 'Creating new GPT entries.\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': '', 'rc': 0, 'start': '2021-06-23 12:05:56.975054', 'end': '2021-06-23 12:05:58.027203', 'delta': '0:00:01.052149', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/nvme0n1 || sgdisk --zap-all /dev/nvme0n1', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': [], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'nvme0n1', 'value': {'virtual': 1, 'links': {'ids': ['lvm-pv-uuid-iVdqm6-FNBB-4tVz-feBG-Fl7s-Npha-NE31Me', 'nvme-INTEL_SSDPEL1K375GA_PHKM913400J3375A', 'nvme-nvme.8086-50484b4d3931333430304a3333373541-494e54454c2053534450454c314b3337354741-00000001'], 'uuids': [], 'labels': [], 'masters': ['dm-0', 'dm-1', 'dm-2', 'dm-3', 'dm-4']}, 'vendor': 'None', 'model': 'INTEL SSDPEL1K375GA', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '732585168', 'sectorsize': '512', 'size': '349.32 GB', 'host': 'Non-Volatile memory controller: Intel Corporation NVMe Datacenter SSD [Optane]', 'holders': ['vg_nvme-lv_2', 'vg_nvme-lv_5', 'vg_nvme-lv_3', 'vg_nvme-lv_1', 'vg_nvme-lv_4']}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'nvme0n1', 'value': {'virtual': 1, 'links': {'ids': ['lvm-pv-uuid-iVdqm6-FNBB-4tVz-feBG-Fl7s-Npha-NE31Me', 'nvme-INTEL_SSDPEL1K375GA_PHKM913400J3375A', 'nvme-nvme.8086-50484b4d3931333430304a3333373541-494e54454c2053534450454c314b3337354741-00000001'], 'uuids': [], 'labels': [], 'masters': ['dm-0', 'dm-1', 'dm-2', 'dm-3', 'dm-4']}, 'vendor': 'None', 'model': 'INTEL SSDPEL1K375GA', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '732585168', 'sectorsize': '512', 'size': '349.32 GB', 'host': 'Non-Volatile memory controller: Intel Corporation NVMe Datacenter SSD [Optane]', 'holders': ['vg_nvme-lv_2', 'vg_nvme-lv_5', 'vg_nvme-lv_3', 'vg_nvme-lv_1', 'vg_nvme-lv_4']}}}, {'cmd': 'sgdisk --zap-all /dev/dm-4 || sgdisk --zap-all /dev/dm-4', 'stdout': 'Creating new GPT entries.\\nWarning: The kernel is still using the old partition table.\\nThe new table will be used at the next reboot or after you\\nrun partprobe(8) or kpartx(8)\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': '', 'rc': 0, 'start': '2021-06-23 12:05:58.160552', 'end': '2021-06-23 12:05:59.221434', 'delta': '0:00:01.060882', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-4 || sgdisk --zap-all /dev/dm-4', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'Warning: The kernel is still using the old partition table.', 'The new table will be used at the next reboot or after you', 'run partprobe(8) or kpartx(8)', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': [], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'dm-4', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_5', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFlcaaj81Mopo31Qoyu1iQZzD1JlE3Lg7g'], 'uuids': ['6ae894fb-370d-4368-bcd6-00a2b169d69d'], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '27262976', 'sectorsize': '512', 'size': '13.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-4', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_5', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFlcaaj81Mopo31Qoyu1iQZzD1JlE3Lg7g'], 'uuids': ['6ae894fb-370d-4368-bcd6-00a2b169d69d'], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '27262976', 'sectorsize': '512', 'size': '13.00 GB', 'host': '', 'holders': []}}}, {'cmd': 'sgdisk --zap-all /dev/dm-2 || sgdisk --zap-all /dev/dm-2', 'stdout': 'Creating new GPT entries.\\nWarning: The kernel is still using the old partition table.\\nThe new table will be used at the next reboot or after you\\nrun partprobe(8) or kpartx(8)\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': '', 'rc': 0, 'start': '2021-06-23 12:05:59.357466', 'end': '2021-06-23 12:06:00.427944', 'delta': '0:00:01.070478', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-2 || sgdisk --zap-all /dev/dm-2', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'Warning: The kernel is still using the old partition table.', 'The new table will be used at the next reboot or after you', 'run partprobe(8) or kpartx(8)', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': [], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'dm-2', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_3', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFk9SXRRj3epmMqJbKzBeC3iyBXdclk1aR'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-2', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_3', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFk9SXRRj3epmMqJbKzBeC3iyBXdclk1aR'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}}, {'cmd': 'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', 'stdout': 'Creating new GPT entries.\\nWarning: The kernel is still using the old partition table.\\nThe new table will be used at the next reboot or after you\\nrun partprobe(8) or kpartx(8)\\nGPT data structures destroyed! You may now partition the disk using fdisk or\\nother utilities.', 'stderr': 'Warning! MBR not overwritten! Error is 5!', 'rc': 0, 'start': '2021-06-23 12:06:00.570109', 'end': '2021-06-23 12:06:01.734130', 'delta': '0:00:01.164021', 'changed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-0 || sgdisk --zap-all /dev/dm-0', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': ['Creating new GPT entries.', 'Warning: The kernel is still using the old partition table.', 'The new table will be used at the next reboot or after you', 'run partprobe(8) or kpartx(8)', 'GPT data structures destroyed! You may now partition the disk using fdisk or', 'other utilities.'], 'stderr_lines': ['Warning! MBR not overwritten! Error is 5!'], '_ansible_no_log': False, 'failed': False, 'item': {'key': 'dm-0', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_1', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFBQDfQaBnMz7zMHah25aOxtoo9v6apraQ'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-0', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_1', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFBQDfQaBnMz7zMHah25aOxtoo9v6apraQ'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}}, {'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': {'key': 'sda', 'value': {'virtual': 1, 'links': {'ids': ['ata-TOSHIBA_MG04ACA100N_Y9I5K31DF6XF', 'scsi-0ATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF', 'scsi-1ATA_TOSHIBA_MG04ACA100N_Y9I5K31DF6XF', 'scsi-350000399ab901d7d', 'scsi-SATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF', 'wwn-0x50000399ab901d7d'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'ATA', 'model': 'TOSHIBA MG04ACA1', 'sas_address': 'None', 'sas_device_handle': 'None', 'serial': 'Y9I5K31DF6XF', 'removable': '0', 'support_discard': '0', 'wwn': '0x50000399ab901d7d', 'partitions': {'sda1': {'links': {'ids': ['ata-TOSHIBA_MG04ACA100N_Y9I5K31DF6XF-part1', 'scsi-0ATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF-part1', 'scsi-1ATA_TOSHIBA_MG04ACA100N_Y9I5K31DF6XF-part1', 'scsi-350000399ab901d7d-part1', 'scsi-SATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF-part1', 'wwn-0x50000399ab901d7d-part1'], 'uuids': ['3b6f09d8-1f6d-44b1-8eae-585ac69873c0'], 'labels': [], 'masters': []}, 'start': '2048', 'sectors': '1953522688', 'sectorsize': 512, 'size': '931.51 GB', 'uuid': '3b6f09d8-1f6d-44b1-8eae-585ac69873c0', 'holders': []}}, 'rotational': '1', 'scheduler_mode': 'mq-deadline', 'sectors': '1953525168', 'sectorsize': '512', 'size': '931.51 GB', 'host': 'SATA controller: Intel Corporation Cannon Lake PCH SATA AHCI Controller (rev 10)', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'sda', 'value': {'virtual': 1, 'links': {'ids': ['ata-TOSHIBA_MG04ACA100N_Y9I5K31DF6XF', 'scsi-0ATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF', 'scsi-1ATA_TOSHIBA_MG04ACA100N_Y9I5K31DF6XF', 'scsi-350000399ab901d7d', 'scsi-SATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF', 'wwn-0x50000399ab901d7d'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'ATA', 'model': 'TOSHIBA MG04ACA1', 'sas_address': 'None', 'sas_device_handle': 'None', 'serial': 'Y9I5K31DF6XF', 'removable': '0', 'support_discard': '0', 'wwn': '0x50000399ab901d7d', 'partitions': {'sda1': {'links': {'ids': ['ata-TOSHIBA_MG04ACA100N_Y9I5K31DF6XF-part1', 'scsi-0ATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF-part1', 'scsi-1ATA_TOSHIBA_MG04ACA100N_Y9I5K31DF6XF-part1', 'scsi-350000399ab901d7d-part1', 'scsi-SATA_TOSHIBA_MG04ACA1_Y9I5K31DF6XF-part1', 'wwn-0x50000399ab901d7d-part1'], 'uuids': ['3b6f09d8-1f6d-44b1-8eae-585ac69873c0'], 'labels': [], 'masters': []}, 'start': '2048', 'sectors': '1953522688', 'sectorsize': 512, 'size': '931.51 GB', 'uuid': '3b6f09d8-1f6d-44b1-8eae-585ac69873c0', 'holders': []}}, 'rotational': '1', 'scheduler_mode': 'mq-deadline', 'sectors': '1953525168', 'sectorsize': '512', 'size': '931.51 GB', 'host': 'SATA controller: Intel Corporation Cannon Lake PCH SATA AHCI Controller (rev 10)', 'holders': []}}}, {'msg': 'non-zero return code', 'cmd': 'sgdisk --zap-all /dev/dm-3 || sgdisk --zap-all /dev/dm-3', 'stdout': '', 'stderr': "Problem opening /dev/dm-3 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!\nProblem opening /dev/dm-3 for reading! Error is 2.\nThe specified file does not exist!\nProblem opening '' for writing! Program will now terminate.\nWarning! MBR not overwritten! Error is 2!", 'rc': 2, 'start': '2021-06-23 12:06:01.877778', 'end': '2021-06-23 12:06:01.881014', 'delta': '0:00:00.003236', 'changed': True, 'failed': True, 'invocation': {'module_args': {'_raw_params': 'sgdisk --zap-all /dev/dm-3 || sgdisk --zap-all /dev/dm-3', '_uses_shell': True, 'warn': True, 'stdin_add_newline': True, 'strip_empty_ends': True, 'argv': 'None', 'chdir': 'None', 'executable': 'None', 'creates': 'None', 'removes': 'None', 'stdin': 'None'}}, 'stdout_lines': [], 'stderr_lines': ['Problem opening /dev/dm-3 for reading! Error is 2.', 'The specified file does not exist!', "Problem opening '' for writing! Program will now terminate.", 'Warning! MBR not overwritten! Error is 2!', 'Problem opening /dev/dm-3 for reading! Error is 2.', 'The specified file does not exist!', "Problem opening '' for writing! Program will now terminate.", 'Warning! MBR not overwritten! Error is 2!'], '_ansible_no_log': False, 'item': {'key': 'dm-3', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_4', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFq11rAGCfcdbm6RAvaPqOSzKWpSU7IxFb'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}, 'ansible_loop_var': 'item', '_ansible_item_label': {'key': 'dm-3', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-vg_nvme-lv_4', 'dm-uuid-LVM-Rj8SiFDmMV4JSKJAypcJksCA79DeqccFq11rAGCfcdbm6RAvaPqOSzKWpSU7IxFb'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'None', 'model': 'None', 'sas_address': 'None', 'sas_device_handle': 'None', 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': '', 'sectors': '176160768', 'sectorsize': '512', 'size': '84.00 GB', 'host': '', 'holders': []}}}], 'changed': True, 'msg': 'All items completed'}}, 'Traceback (most recent call last)': 'File "/home/teuthworker/src/git.ceph.com_git_ceph-cm-ansible_master/callback_plugins/failure_log.py", line 44, in log_failure log.error(yaml.safe_dump(failure)) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/__init__.py", line 306, in safe_dump return dump_all([data], stream, Dumper=SafeDumper, **kwds) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/__init__.py", line 278, in dump_all dumper.represent(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 27, in represent node = self.represent_data(data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict return self.represent_mapping(\'tag:yaml.org,2002:map\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 118, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict return self.represent_mapping(\'tag:yaml.org,2002:map\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 118, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 199, in represent_list return self.represent_sequence(\'tag:yaml.org,2002:seq\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 92, in represent_sequence node_item = self.represent_data(item) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict return self.represent_mapping(\'tag:yaml.org,2002:map\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 118, in represent_mapping node_value = self.represent_data(item_value) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data node = self.yaml_representers[data_types[0]](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict return self.represent_mapping(\'tag:yaml.org,2002:map\', data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 117, in represent_mapping node_key = self.represent_data(item_key) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 58, in represent_data node = self.yaml_representers[None](self, data) File "/home/teuthworker/src/git.ceph.com_git_teuthology_f359b10daba6e0103d42ccfc021bc797f3cd7edc/virtualenv/lib/python3.6/site-packages/yaml/representer.py", line 231, in represent_undefined raise RepresenterError("cannot represent an object", data)', 'yaml.representer.RepresenterError': "('cannot represent an object', 'key')"}
  • flavor:
  • job_id: 6155877
  • kernel:
    • sha1: distro
    • kdb: True
  • last_in_suite: False
  • machine_type: gibba
  • name: teuthology-2021-06-06_07:01:03-rados-master-distro-basic-gibba
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 8.3
  • overrides:
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • sha1: 76d882bd6dc255cde9c124bcae8016c4da514d05
      • branch: master
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 89951a1d20252d6829599ab3fa5df234fc6081ae
      • fs: xfs
      • conf:
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • global:
          • osd_max_pg_log_entries: 2
          • ms inject delay max: 1
          • ms inject delay type: osd
          • osd_pool_default_min_size: 2
          • osd_pool_default_size: 2
          • ms type: async
          • ms inject delay probability: 0.005
          • mon client directed command retry: 5
          • osd_pg_log_trim_min: 0
          • osd_async_recovery_min_cost: 1
          • ms inject socket failures: 2500
          • osd_min_pg_log_entries: 1
          • mon election default strategy: 1
          • ms inject internal delays: 0.002
        • mon:
          • debug paxos: 20
          • mon scrub interval: 300
          • debug mon: 20
          • debug ms: 1
        • osd:
          • filestore queue throttle max multiple: 10
          • osd debug verify missing on start: True
          • mon osd backfillfull_ratio: 0.85
          • osd shutdown pgref assert: True
          • bdev async discard: True
          • osd max backfills: 9
          • mon osd nearfull ratio: 0.8
          • osd memory target: 1610612736
          • osd objectstore: bluestore
          • journal throttle high multiple: 2
          • osd op queue: debug_random
          • osd_min_pg_log_entries: 3000
          • osd scrub min interval: 60
          • bdev enable discard: True
          • osd failsafe full ratio: 0.95
          • mon osd full ratio: 0.9
          • osd op queue cut off: debug_random
          • debug ms: 1
          • bluestore fsck on mount: True
          • osd scrub max interval: 120
          • debug osd: 20
          • debug bluestore: 20
          • debug bluefs: 20
          • debug rocksdb: 10
          • osd debug verify cached snaps: True
          • osd max markdown count: 1000
          • filestore queue throttle high multiple: 2
          • osd_max_pg_log_entries: 3000
          • bluestore block size: 96636764160
          • journal throttle max multiple: 10
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(REQUEST_STUCK\)
        • \(OSD_SLOW_PING_TIME
        • but it is still running
        • objects unfound and apparently lost
        • overall HEALTH_
        • \(OSDMAP_FLAGS\)
        • \(OSD_
        • \(PG_
        • \(POOL_
        • \(CACHE_POOL_
        • \(SMALLER_PGP_NUM\)
        • \(OBJECT_
        • \(SLOW_OPS\)
        • \(REQUEST_SLOW\)
        • \(TOO_FEW_PGS\)
        • slow request
        • must scrub before tier agent can activate
    • install:
      • ceph:
        • sha1: 89951a1d20252d6829599ab3fa5df234fc6081ae
    • admin_socket:
      • branch: master
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a']
    • ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'node-exporter.b']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=de9ec4f31c49478f83e83b991aed952d
  • status: dead
  • success: False
  • branch: master
  • seed:
  • sha1: 89951a1d20252d6829599ab3fa5df234fc6081ae
  • subset:
  • suite:
  • suite_branch: master
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 76d882bd6dc255cde9c124bcae8016c4da514d05
  • targets:
    • gibba044.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCvLITslTYB93Lnxkvi03nBmEXkKw/pfuVuM3L9ws93dNRCOr/R2/LDF2xHECvZKEZHTO/1RU4QSxFOMi8ZPW434G8orAi/CRb7SQyU7oRbJPohUh2Q+fcfFqcA5pgJ06OqwhA0LC6Dz/8NES5inNjDWZG28UmbIsA1qhY+lHUYlXLvv0mCSaWmrBfZ5n1/JicfOYoEOCPFNwCcVhxZLE1nPTRB2y92LVNDvZRgYyqg1S+iZsNGmd0UqguAX9mgU68r+DY9tpW9NUjn0fzDfpcJCRMSejhKgqCiP4KaPqqXvYPpb3wYTxiqzGVIDdnrjObA7TwBV0l4FgXBhdok9X+dIaY6dg9nspyCHbSoDVmGu+mMg43IfVJvxulYa2jYN5hSeHtdf3FznJ7Op+L8RuqmcNa8Jim+zCaGUJPAPVj+JCr1YjkOLh84/E/aj5/kmyJvrtwnwFx5S44334IWaDBtqrwV4W8Kk25WGvfwTPhh055ciDmGJ6THEvkRfbg93NE=
    • gibba040.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9vW4CdP11VIBQkywokbXLxxz4YaQ8oKhFzlFNHfQuZvwgMIxP5k8KCXnn8qFYjK7/pTeAvFMTy0GTaXJvRUyKvu0kbxF0aFA0YnNajLHxnqM0aEQO1U3RD2CD7GSwWKtLKz+C8WlWwXW39VR2YSNskapJ/vGzNLNAP5EyC9yqhbQr8j0opJNRabAeWdYswrJwPLTB45G1Tw1PQqm953QpdPVFzYKqHZTwD59JT9vJJb/CZQAIsA6VVaEfdZWhlnN1cgMWyBT6ZmR9YnH06HvD4yIy6gw0FcMgBp0/J7P35F2PAgXs5v80TpynBoBLCk/wleYefXoi+lO3QyOxZ0qEkI5NdH4tqJ91zCkEg0FQSae8BtkE3gPkL7jiQjDznBSAHzICFX5CTjm5ordtue7kdClmFXHRlgwT3dbaWh+W7go7DXYTN3bmvBle8Hx6QFcuM7EYI6j3e+rUP2cGrXmGNlzK2qDfoee3cQ+u3TuNAQdcKt9i/BiuuPQx+Q31iSc=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
      • conf:
        • osd:
          • debug monc: 20
    • thrashosds:
      • chance_pgnum_grow: 3
      • chance_pgpnum_fix: 1
      • timeout: 1200
    • exec:
      • client.0:
        • sudo ceph osd pool create base 4
        • sudo ceph osd pool application enable base rados
        • sudo ceph osd pool create cache 4
        • sudo ceph osd tier add base cache
        • sudo ceph osd tier cache-mode cache readproxy
        • sudo ceph osd tier set-overlay base cache
        • sudo ceph osd pool set cache hit_set_type bloom
        • sudo ceph osd pool set cache hit_set_count 8
        • sudo ceph osd pool set cache hit_set_period 3600
        • sudo ceph osd pool set cache target_max_objects 250
    • rados:
      • op_weights:
        • snap_remove: 50
        • write: 100
        • rollback: 50
        • read: 100
        • copy_from: 50
        • snap_create: 50
        • cache_try_flush: 50
        • cache_flush: 50
        • cache_evict: 50
        • delete: 50
      • ops: 4000
      • pool_snaps: True
      • clients:
        • client.0
      • objects: 500
      • pools:
        • base
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2021-06-06 07:04:10
  • started: 2021-06-23 11:55:08
  • updated: 2021-06-23 12:08:02
  • status_class: danger
  • runtime: 0:12:54
  • wait_time: 0:07:10