Description: fs:workload/{begin clusters/1a5s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{frag_enable osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/1 scrub/no tasks/{0-check-counter workunit/direct_io}}

Log: http://qa-proxy.ceph.com/teuthology/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931177/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=e1e67b20a289415397247afc8c2dc9a7

Failure Reason:

too many values to unpack (expected 1)

  • log_href: http://qa-proxy.ceph.com/teuthology/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931177/teuthology.log
  • archive_path: /home/teuthworker/archive/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931177
  • description: fs:workload/{begin clusters/1a5s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{frag_enable osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/1 scrub/no tasks/{0-check-counter workunit/direct_io}}
  • duration:
  • email:
  • failure_reason: too many values to unpack (expected 1)
  • flavor:
  • job_id: 5931177
  • kernel:
    • client:
      • branch: testing
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.2
  • overrides:
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • check-counter:
      • dry_run: True
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • sha1: ac62ca61d46e87208fa6292c9152faacd08c23b1
      • branch: julpark_workload
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
      • conf:
        • global:
          • ms die on skipped message: False
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • rados osd op timeout: 15m
          • debug ms: 1
          • rados mon op timeout: 15m
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
          • mon op complaint time: 120
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • rados mon op timeout: 15m
          • rados osd op timeout: 15m
          • mds debug scatterstat: True
          • mds debug frag: True
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 20
          • bluestore fsck on mount: True
          • osd_max_omap_entries_per_request: 10
          • debug osd: 25
          • bluestore compression mode: aggressive
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • debug journal: 20
      • cephfs:
        • session_timeout: 300
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
    • install:
      • ceph:
        • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
    • admin_socket:
      • branch: master
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_julpark@teuthology
  • pid:
  • roles:
    • ['host.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0']
    • ['host.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1']
    • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
    • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
    • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=e1e67b20a289415397247afc8c2dc9a7
  • status: fail
  • success: False
  • branch: master
  • seed:
  • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
  • subset:
  • suite:
  • suite_branch: julpark_workload
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: ac62ca61d46e87208fa6292c9152faacd08c23b1
  • targets:
    • smithi074.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDLmy8FODsuETb4e5NIShlUDD7EAgRWHiNXdvsNBu/Sb5KjS1QeabczGYwzf2E4wDHHEkEaBn2knJ/OAPhNKQ313nUk9Ub4ghbeRpiRT06eyZwTZSzY0vGZbHG1Zpi4AjxziZ28CXWvzI5VJ/wRRhbGMqM0vtZ+gjZaQv2ElPlJy7AZ1vM65qDWEP8NpqUhkniAvQrsHLY6Vo3foAWX8W0gfJ9P0hH19cKJH3Ys1pj2dSNbXv+FIOOqZjUuLimwaMhi/e1e2jGCca0+2LyKM9fh84B+2kAtVhhNgmXcStwDhNzxMLQYgLJiMaMtICApUyziIevIlJ4D6EmGzUOmNJqi5xpHQFOmYxLcTUGXqfBDAxX7p49njhw3R5QgvqbXd2oraxzxnGDrizWp/uC3o6ulRqS/G+Fhr3B077Pnkn3Mnt+/bccgx0Sf0P2KFHZ8wcLars1+a5zhqItg4rw2ABRTNerqavs0Ond/UP1jg2NtgufWwpeTRvXCbVeojWBC6v8=
    • smithi107.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC0McKcqvuPLQYkqo3Ai8zeGZWiueTDkSm70BskJI7lv0jKekZHDuzqOp4E0hcjucOOsqX3A7IwZegmLk/GYJn3bYLWrnbaksvML2thDP6weTRu/a97DBuIutCql9QsdUGG0rtuHBlJl44j9mzy4sdSUKE2DbYSngWMWd2mtAq2j19U0LKX/xsLzyLsGY4cq/Gpm3uSi/HCu4hDhtNSPZi5Iuw42nEPSn/fVNIs6GGgOvPDOC8HfsneDGn1vr0jifN4JxyYhraaXQmt+YjSFhlh/kfzY2Wgza9V3Gt2ZQsdAqpuVzkaCAFbyISONkQD3wkZ2UWbjB9aaSj2M/eHSmm5eCfZ6giIL3KXOKQUzkroWmq+JZgvrvRgr4AsD8v4ypNrFpOGO21XDmqGqWlAgOHispvOm7yb8w94SwuJAOxbffE2/YLqDLZQYtvjKyfGPZJBz5acERyLJkkfAMF+3rYnAkGcM271K6VxhkfL7TSIuPT4p8x9ZgpRx9PDz35/BSc=
    • smithi099.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCvk3HyxfOCs2KrN2emQBdDD/oCvLoAsywX4EVFFXidVME5BQxsVDPO9Zrqfka74em8Q16hTtoNKvI1vmZK6cSNA2kydF8zwMkE6m+29SOi6m+JmMr9cp6ePn9fvzRqbmvn2+gbuXC93nh75eflW8Z/ioKpAWkLnejwq9nxH9EKFBDTvF+oo13magzBVqlnEnshW6CqyxWBPKWwXmmP5rcplC9WoADmwFa9JfAkMt2Bf089+q0jpeAXpwlidW5IM7xH5AWp7HyNmNLNf7odLfWEU8EqjPOXvkVRHNdGy6oW5fV7Xm+A1DJs57zcYuQ88Id9wrR3LT9amkSqBZo7jLzXPj/JfX18sH8EvEoZMZo27FtM9xRzGrAp8XLeldh5i8/hPhq4fwJTSfaOjGWfI+bcaltO1aWKhuvF/rKH+FA3l0daNQ1Lixv2dM/80Clt13U3orrAyBGxcpwB/0yF/WB1BRhKfJlLsjzTxyhm3tgnOmwIhG15COZJ8MF01eZOfgE=
    • smithi075.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDrO7odvS8nWJLheAOptLgWG4q4ZOyl7VcEjt2kWlZcaD+raZ2vlAHYCacpi2aqee9Bc6urSI7ks3XVwXCr2gSOXRg+AJUqzye9/uSjGpj7LPkjg1pD7nloVmmicAsyQCXEce4J2xE1XfpubDQADVmSkNIrZm1plBlRIrttnq5N9CgDz5DGA6k+HlBWLG/JN6/ZBrWBWm6t7UOv3lnAL+ctG4gyvBXcVURx50jwMe9kY8QU2ODL2EPoGVRwOZbXeBWrDd3ruYEuiRYPHTZ1gahFlIdTnHZorcLzm9sjiTXrXyGLO7RlKUa7E6wTa2N1OEcswzzrCEcmm8DmfckxturDDsRZ2ftT43blsteH33Y0G8+McdSqwK+NztaFOe5q3tCInwVVnFYHBbKmytmB2M5C+7yxO8pIkgPdLnx9DcvbGfksbClxGjhmlWBvYwFgtxDlNEVzBALU/a7oMWBvs7CJhaKCbaR1YYbF3mAnB7d1qALB9KBjK065cp/Xy7V+0ek=
    • smithi039.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9tF7MZ17obyaVZe/Yf2F43cWtH1sYkYo9yiCis7VKRPgQ1XFp1gXwyysgFxQk7G/IlPmq0Bt9Mo83lClYuhi5NxE8E7zGV+fjwBP4uvE3z9xZmHHFjUmm76pYPGtPo2+LCVWzLib9ScZ+sfpKdzZEVO8K909RpK9jObTeWzCJbsnXsxGXIN0joQ0ba9SemWx1upUaqmMyxlsMYPPZOWOX+N7ZbBqlg8oj6XPCV4tNqvxrtP4j6xo3nDJGtCSdqi2v/Motu4jOyYhec9xox1bfPEPGSjWSugu+4DbXdBfmTH4FOZEa40J6LxRi/iMajJKViuIhIwbRAnX4jRSCtw2W5DwZKF+r9KUy2NZr0qGTbL2Dz88elE+MjD6SJ2aQbjJrVlZHm+dCLWfCTOZvnvCMSWj6K5BqRzYoA/CF9EOwf5xqeOTFVVhXdl3U+IFfJwKMo5y1fToohmy8x+Am1HiryhIfA+HSuDT6E/3wfGVCxhP6OCDrW7fU/x0iKuAQlcs=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • branch: testing
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • cephadm:
      • roleless: True
    • cephadm.shell:
      • host.a:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
    • cephadm.shell:
      • host.a:
        • ceph fs volume create foo
        • ceph fs volume create bar
    • sleep:
      • interval: 60
    • kclient:
    • ceph-fuse:
      • client.0:
        • cephfs_name: foo
    • ceph-fuse:
      • client.1:
        • cephfs_name: bar
    • check-counter:
      • workunit:
        • clients:
          • all:
            • direct_io
    • teuthology_branch: master
    • verbose: True
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2021-03-03 20:05:37
    • started: 2021-03-04 19:21:25
    • updated: 2021-03-04 19:30:08
    • status_class: danger
    • runtime: 0:08:43