Description: fs:workload/{begin clusters/1a5s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{frag_enable osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/1 scrub/yes tasks/{0-check-counter workunit/suites/fsync-tester}}

Log: http://qa-proxy.ceph.com/teuthology/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931186/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=ae4823eee7f4409ead69795f16a67c88

Failure Reason:

too many values to unpack (expected 1)

  • log_href: http://qa-proxy.ceph.com/teuthology/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931186/teuthology.log
  • archive_path: /home/teuthworker/archive/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931186
  • description: fs:workload/{begin clusters/1a5s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{frag_enable osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/1 scrub/yes tasks/{0-check-counter workunit/suites/fsync-tester}}
  • duration:
  • email:
  • failure_reason: too many values to unpack (expected 1)
  • flavor:
  • job_id: 5931186
  • kernel:
    • client:
      • branch: testing
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 20.04
  • overrides:
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • check-counter:
      • dry_run: True
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • sha1: ac62ca61d46e87208fa6292c9152faacd08c23b1
      • branch: julpark_workload
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
      • conf:
        • global:
          • ms die on skipped message: False
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • rados osd op timeout: 15m
          • debug ms: 1
          • rados mon op timeout: 15m
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
          • mon op complaint time: 120
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • rados mon op timeout: 15m
          • rados osd op timeout: 15m
          • mds debug scatterstat: True
          • mds debug frag: True
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 20
          • bluestore fsck on mount: True
          • osd_max_omap_entries_per_request: 10000
          • debug osd: 25
          • bluestore compression mode: aggressive
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • debug journal: 20
      • cephfs:
        • session_timeout: 300
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
    • install:
      • ceph:
        • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
    • admin_socket:
      • branch: master
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_julpark@teuthology
  • pid:
  • roles:
    • ['host.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0']
    • ['host.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1']
    • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
    • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
    • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=ae4823eee7f4409ead69795f16a67c88
  • status: fail
  • success: False
  • branch: master
  • seed:
  • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
  • subset:
  • suite:
  • suite_branch: julpark_workload
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: ac62ca61d46e87208fa6292c9152faacd08c23b1
  • targets:
    • smithi117.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDj4faSX+q7iKecLcxlg5YTx1i5O51Yilb18S6sukwq8B/zwWUHoPD63cdtjtAYr1HQYyppQGwaux8JOnPRdbmUDD5UpeLtBbNHF2120+T5ruzJvBnYxHTCaxW2JrR+rkIl1WXmciyTD4J/3Mbt3PiRta6y17TlSy1Kfxp8pYtqZVWLtZ8Tj9T/K2sjsMLMf5267FrO1e+S/mOB8KGsaROBRlij+xVTtu0PaugqLlHpM05ccXybuD2O49Rj5hHjqwe6FGAYhorvmJdEiz649IRUMjCyAifxHgcuu/VfzmAy0LhIGx2C2l5o2dabjbON+xsEFUC2N7XD3HN9iHys7ExVCwnLW+oG6l0/ZjDVZ0OAoalq21ZvLbIh5YRMvuhpGB2nYv4jyHn0ksEyFhrAhXNanHGI9mAj06Vw1wIOMyg5xlJD84h8/97IHnDI4IGf7kaM+RXIzMQ7/DvMDxRMBLfQV7aCXWq0ePTMX7WfysxkRaEUPngn9w0miNsOt+rsb30=
    • smithi094.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCm6YciOMQNL4K9mkBGglBUyGgGqm7iJMQU7qTlthY3q+BuXvwNaR+lRDWcfaG8yJfBF0owA6bUrLdoyjrlXM7n/4GGL/nBbGDKK0NSt451geA654At+zAyWsVJ+2CZLg0O6dOudQ7/2gQH7mDKAqvuL4KYKm7XE36P19IHFghqEgTJ2EX9urc+FHKDzRSJYEnZvCL1tN3GwRTiDrZPhkdLxKY6O4yNpUbWOXK8AK/UrJG03PFiMxrSwm9NgntGOTeXah0BThJIKGjWXiD29PqMesUeCYNdZNePMTJueRjNdaABAaeUzxreGr9YjRJTaFKsKSTuHfgTLCpNb/MPdC0v7RweSbDqZpVGnFIa/DKQIVCNIBAbeG7lmgnq0M+gktIkS1YlddhXkcdFUn+1b/y/mlO0mZFwHlQqarFsJUULxA6tWbm8izJAz8PB94x+5L1d4DspCwafdu7rVKMOi4T7Sdh0aMuv6KXeYWZTSwa7L8Oexlyfwz9gRGVGS2x2JP8=
    • smithi072.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCn+aSOJpOaoZbJ8cxQLoHwF03EDrda5SulaaivsqcrJEeSd2EuHiLY19bWUtUvHzp2XMUufdJIZNcgp1M4b/nmJaDLKXf0LW7X1Fsl46s34erM+cPk07IN3b5phDmUODRzfBnvCWZkp507XT+l8nRQMeBfdUmQASkeuS+OZji79h9FGidabzfcqEkXFx550dMjr51aH+amUsVWzwl/xxM75ksRMZ6N9HO8feyn9quv7I4qFC8otRkF/KNKaxlcd4wbcKn2TXAZ/RgUMeq/r5+wBKge1TWorUDdY7lUsdEoxlQ/l8un4MWed5KRV6Z8n4gvq0srdrXhIPU8DWUZg7fSUT3Tzzua1ssiKalHwemN8lwQacjnlHwoaiY/733j/h7tTmzWI68cPQBh1QWiZ4gWDvYgL9h4fuOPfFCQKoqdgTIT9LqRTLNUu4L1tWXmrFqHdge37jYwWeb95/iavLBC32hFnDKND5FM+0hGrTDeROjlacdp5c9j2woeM2uPdRM=
    • smithi086.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDOch5+MKKP18n1q7NzB+VNKlw9Bfius5JDGxw+naCft/W5oG9RodgEqYG7gK0lYYIyYhsPle2LkSJYIBfL0xiZo/0S7HYXoraZAIkbf1Lq0YKc/RgWMKDAm1KvQXKU4kZtj0o0mpX9ORYlDczWitAhEhzJEHGdFgshLismojVjyHGZURFBkJu3HR5v7IrTLo2UH3sOhsIfv1TNy7VDfjvQ7EwrMnQhwahnB6jhTuUEQsQPCTiNNKMYzbU2JvNydYJ2oI13lhlhqWIt4A1cwO6Pb6vTBjU1t6+tyBHToXPHwJsNq9lLxcENkVhwNsQajlq7Y2jaM1pfAhF9Ymukyp1Av+a6g4IxrcvW5QUFsV666F/SM59AU/dTriXl83MMsagb9WMbsfZ3ptaW1j0jsTbP7LpaaDJzvmbq9eD29TmRdJBcePg3vAIV76aFz5IoMcqc7RgmL4tWtzm0BmsC242ZwofXcepgIlglq468EH+92ZRVX598aiPBFURGHEY3HiU=
    • smithi203.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPqRzhOBgQxwctFWiXzJnObAZzOGnRberm+9zZtKn2uFu6AqQhl4M9625ayZ1frQRSWyYhb9oYWvueJQ3bMUoLj9eDdZ6VMfv1dONaCu/5kWqDEXXJpWwMyUruhJptPCiVzXdTkiwb2mp/cC/XB5XroUS/r0mxEAZ5TCh2JUZ/S1MA28PqT8Uv6OIqq6C8UiBVKrlg7aPDxBIZ6jkdrjcE9WYqOCF9BMoRL6zibNxJ+luwUwPn2VA2MWYPJ/kvnEG8PjQt3eU2ILdUO5Y0mtLWiPUdFXoSS/EV2y3AI7QIKrWKL/AQhEQ7byhIWi8fGd5OzC+9sKkBgGE+grPCrwUO+STOZblXGDwmOGFAlpi7T2Ny6P/AKXPX4RPlIkuxoaD018XG3/vhUGk5OyUuo7iuNmYel8thnQqpCOj7fMzp2xgpaxKiWuMMipMqDhGWcF/XalAsAB3tMAU4sL2tdDLjOAgnoOJwF7Lxz98y5UxYnXc4MjKRw/fmdIaZtCgih/8=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • branch: testing
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • cephadm:
      • roleless: True
    • cephadm.shell:
      • host.a:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
    • cephadm.shell:
      • host.a:
        • ceph fs volume create foo
        • ceph fs volume create bar
    • sleep:
      • interval: 60
    • kclient:
    • ceph-fuse:
      • client.0:
        • cephfs_name: foo
    • ceph-fuse:
      • client.1:
        • cephfs_name: bar
    • fwd_scrub:
      • scrub_timeout: 900
      • sleep_between_iterations: 1
    • check-counter:
      • workunit:
        • clients:
          • all:
            • suites/fsync-tester.sh
    • teuthology_branch: master
    • verbose: True
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2021-03-03 20:05:45
    • started: 2021-03-04 19:26:51
    • updated: 2021-03-04 19:35:14
    • status_class: danger
    • runtime: 0:08:23