Description: fs:workload/{begin clusters/1a5s-mds-1c-client-3node conf/{client mds mon osd} distro/{ubuntu_latest} mount mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{frag_enable osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/yes tasks/{0-check-counter workunit/fs/misc}}

Log: http://qa-proxy.ceph.com/teuthology/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931178/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=7a35f1033c384edc8d48dcbe64d357bc

Failure Reason:

too many values to unpack (expected 1)

  • log_href: http://qa-proxy.ceph.com/teuthology/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931178/teuthology.log
  • archive_path: /home/teuthworker/archive/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931178
  • description: fs:workload/{begin clusters/1a5s-mds-1c-client-3node conf/{client mds mon osd} distro/{ubuntu_latest} mount mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{frag_enable osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/yes tasks/{0-check-counter workunit/fs/misc}}
  • duration:
  • email:
  • failure_reason: too many values to unpack (expected 1)
  • flavor:
  • job_id: 5931178
  • kernel:
    • client:
      • branch: testing
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 20.04
  • overrides:
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • check-counter:
      • counters:
        • mds:
          • mds.exported
          • mds.imported
          • mds.dir_split
    • workunit:
      • sha1: ac62ca61d46e87208fa6292c9152faacd08c23b1
      • branch: julpark_workload
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
      • conf:
        • global:
          • ms die on skipped message: False
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • rados osd op timeout: 15m
          • debug ms: 1
          • rados mon op timeout: 15m
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
          • mon op complaint time: 120
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • rados mon op timeout: 15m
          • rados osd op timeout: 15m
          • mds debug scatterstat: True
          • mds debug frag: True
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 20
          • bluestore fsck on mount: True
          • osd_max_omap_entries_per_request: 10000
          • debug osd: 25
          • bluestore compression mode: aggressive
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • debug journal: 20
      • cephfs:
        • session_timeout: 300
        • max_mds: 3
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
    • install:
      • ceph:
        • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
    • admin_socket:
      • branch: master
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_julpark@teuthology
  • pid:
  • roles:
    • ['host.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0']
    • ['host.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1']
    • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
    • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
    • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=7a35f1033c384edc8d48dcbe64d357bc
  • status: fail
  • success: False
  • branch: master
  • seed:
  • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
  • subset:
  • suite:
  • suite_branch: julpark_workload
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: ac62ca61d46e87208fa6292c9152faacd08c23b1
  • targets:
    • smithi118.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClZzCvRFPFBFsnG9yazfIsd+nzVBf2HoP4IE4+EG9AbtlbcQ6xRfjnTh2DfIts26QRB3LZKVFdq6J7d3GC+6ynHxVzMvLB15c9XB5mLJ+Q9uMxEGvy7RXFNJY2V+AZNc7wxXcqBouafxnMmVyaDy3hHKjnYi6GGVPf7dINRDccLxHqC8lsFp8LTxktC3MrVdZyP1yajmZ/IqR6YTOqhBXx99XTUCGnoBieUUBKqze2cJ3EOBZsJSzOTfYOavOJ2lWDnCwcG5vcD6lId8+l5WskyIk9SNyEsIVKFBTDLPA3Xpag/DoqtMKu7fz/+zsEjQNopfDhQi69NlF/YMiXiPPhlQnK3StBeDqLpdjGtqSsf9uhPMOdrQUca3S3/obdmb7mD+xAnt0IpFzvwfs764dO4vOzVTmba5yooHzqO+NmDmZ1FAm7Da2hPOnhxU2m5Dzq0GZb6tSO2YXyQTyDXjdueaJ1W37cdav8OyiXGqJ9GdhF9HHl4wHZMdVcE+dqaVM=
    • smithi178.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8CRqDxH+0/kdZOMNwpHEpXkxqP87XNh/WNyabyHZteacQ5FMRrn5vin4niiu4XZyWFLyGYt3XW4uYuxGo3r1XLIlcvOGqZHaoFm6DqP2nPbI/dMwmyan7NtpxbwSWSoy9XKT3E2JuIevB1INRLmpxaXRfJc73dt8200PiJKT2oeBVLdbrjarJQoWkDOk0joDFRkLM1uDFDGI6paJiIq/v6MBV1/hh7toX9VSoioXmLlnXsPcD/crHYGOL5k+PBT6ckD0Mmvq1res1/PNX0GS85JiJMOtUydJqr+S+DNtd1N0EtWeAlO/S+txkr4HxZPom5S+Ohr+T6JAPalSF+lTurFC7qMPdYzFE2ByMhpgYiRNhLllSHvqlntvoAsDrZhAJGB9WPHOr9SUB88hEu5vohwV7zhi/MQWVh34OFoowiNDXmATeYo8vS34+ae6r7F34wlQl5n+KVX/tBXlD8TG2YoDFDAdcohooXNCaxCkH9akAwuTTh/Y6xZIFsIn3nDU=
    • smithi050.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3eRXWvHLSQjSXMGcNyG1GI/XQQx8CcYT6z6qjNUIUDp3gb7N2wRDqEgy4kJ0iPN/Kgtwxqz0r/ls905sC3Mr/KNhNyT7kwE/xaOvUKJX6OE8JouwRBOzsqYHcI52RNodZsjGuzD67Isf+iWJK9//1WAStSdt7yliJmtKviRzzJ6FwkDh9y2yrCwxPG1BIm5zQzOm5Olu0Rn/R3b0EHvxkyBQaQ8Xsw8sa0Tw9nER1w0N/Bk/mNgz3HewILudm4MBskYYVaGshjbbyfVxdV5+quEMKeLWfpLIdzf3oA7NQeSrH0SRyEDTPKrIHp5LGqjzTIlz/wtoqNgKeT6+uw85TOXMLifjYXsRx3SrgM9vppWbEwuoxlD6+MQhM+jmAVJZq5m+OMCsCsgJkvue3cHvzoR8dglsTO2BAO7yOv3g/5RctzUKi9QVLkjTWBWWLaJQF1e2Yh4rhJPUf/ki/1f6VzNkwv0EcJT+YifwThfis7snUi9V3AOJHUsSsiJCQV/E=
    • smithi031.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDLpknoV43CwpjY9hWEujTvyYchIqA/VH/ot0fIeDHe5inVxpfyYE2EMyUvi8jHa6G0mYgFkzt8tUc5KOoOI+0KoMi4gBp4GB562+0xscElleuGKZS4VfT/Z5CF/gMC+m9s0uO9ek8LjfddlAUymqMceoQVHQxgDojsNfvKUXcd6YEAe4GyJPsy37JhSwW0pE3PcnfeF9GedOmCC597YZCrqm4pRN+3t0NL0ZNwrTkgWkLb/P7lerad/0Bnc3yA2Cw2l9CJL6GzDZcgH+sPSNIU3BTal7B0ZInliwWpF5jMw+/OQClMNVxysHiV5JPGgkQOaLQGHY/v1FeDR2gTDO9WPQTL1lgV8KuRBq1APcBHzQh2eKx7M/cJOjkLZHArmlKEBL4iCrZ4+hZMccQIHxhRPBi+PzSqM6gTEuDJkmG8Q1muEMH+0+2dBbcEj0Iu7hVTurfuXd4CO3u9P0fXA9y9SqudJSsW6Gx6lFqSliofkZKCpNdQf8YcDGmVaYT7O/U=
    • smithi142.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDChf9nu7VExIfUbrn3iSI3PLjmUoYYVjk8ayGOU80kKJ6M5x1EwImMJab9S8rsIoD+AzObRPaGOlbFp9q9BeR/T6AfsrP+Z28s733DRrxg5qKZdktZsJ1JQu6Gif++7VxCwoml7GynYu5dPygw3CF1L14EOGlvy5i+ocRIxzyt3+QG8QnoNtuaGR4gQ5+OJpiUzxUUNI3EPw33pEB6pXQxjjH784YuF8yumDnGlAQE5+/t/aNWEWF2zFcgyeHxEhzfUw8lSrb1CTv5jPE2TP5uAwNz1cOL6k6qNZSHRnEjmwtaKj12NBJxmaYW3eEugY6n182RA/aV9uHxKo8K3K2UofSSzWWx0/AevSGJTsFVFH6a+Imbh7RPVrpgQSu9p0g3N/0j6p6t0d6//EtG+C5n7h2qutUi7twhbreS3UGWgBbUBW0tUwRw/7XDAA0ebzhU88dpf1TS9jVJ60Jas8PKXXx09PjcIJyEzJBKNqHS6cpDm1xgNiSN+SVSgg4q73U=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • branch: testing
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • cephadm:
      • roleless: True
    • cephadm.shell:
      • host.a:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
    • cephadm.shell:
      • host.a:
        • ceph fs volume create foo
        • ceph fs volume create bar
    • sleep:
      • interval: 60
    • kclient:
    • ceph-fuse:
      • client.0:
        • cephfs_name: foo
    • ceph-fuse:
      • client.1:
        • cephfs_name: bar
    • fwd_scrub:
      • scrub_timeout: 900
      • sleep_between_iterations: 1
    • check-counter:
      • workunit:
        • clients:
          • all:
            • fs/misc
    • teuthology_branch: master
    • verbose: True
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2021-03-03 20:05:38
    • started: 2021-03-04 19:22:26
    • updated: 2021-03-04 19:31:14
    • status_class: danger
    • runtime: 0:08:48