Description: fs:workload/{begin clusters/1a5s-mds-1c-client-3node conf/{client mds mon osd} distro/{rhel_8} mount mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{frag_enable osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/no tasks/{0-check-counter workunit/direct_io}}

Log: http://qa-proxy.ceph.com/teuthology/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931163/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=8fdcb7b339464688b97ff1d317544518

Failure Reason:

Command failed on smithi045 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:bfde964cf0400f19b7ae46122039982198a0f4db shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid adad908a-7d1f-11eb-9065-001a4aab830c -- ceph orch daemon add mon smithi045:172.21.15.45=smithi045'

  • log_href: http://qa-proxy.ceph.com/teuthology/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931163/teuthology.log
  • archive_path: /home/teuthworker/archive/julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi/5931163
  • description: fs:workload/{begin clusters/1a5s-mds-1c-client-3node conf/{client mds mon osd} distro/{rhel_8} mount mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{frag_enable osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/no tasks/{0-check-counter workunit/direct_io}}
  • duration: 0:15:01
  • email:
  • failure_reason: Command failed on smithi045 with status 22: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:bfde964cf0400f19b7ae46122039982198a0f4db shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid adad908a-7d1f-11eb-9065-001a4aab830c -- ceph orch daemon add mon smithi045:172.21.15.45=smithi045'
  • flavor: basic
  • job_id: 5931163
  • kernel:
    • sha1: distro
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: julpark-2021-03-03_20:03:25-fs:workload-master-distro-basic-smithi
  • nuke_on_error: True
  • os_type: rhel
  • os_version: 8.3
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • check-counter:
      • dry_run: True
      • counters:
        • mds:
          • mds.exported
          • mds.imported
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • sha1: ac62ca61d46e87208fa6292c9152faacd08c23b1
      • branch: julpark_workload
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • rados mon op timeout: 15m
          • rados osd op timeout: 15m
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • rados osd op timeout: 15m
          • debug ms: 1
          • rados mon op timeout: 15m
          • debug client: 20
          • client mount timeout: 600
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 20
          • debug journal: 20
          • osd_max_omap_entries_per_request: 10
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
          • mon op complaint time: 120
        • mgr:
          • debug ms: 1
          • debug mgr: 20
      • cephfs:
        • session_timeout: 300
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
        • max_mds: 3
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
    • install:
      • ceph:
        • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
    • admin_socket:
      • branch: master
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_julpark@teuthology
  • pid:
  • roles:
    • ['host.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0']
    • ['host.b', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1']
    • ['mon.a', 'mgr.x', 'mds.a', 'mds.d', 'osd.0', 'osd.3', 'osd.6', 'osd.9', 'client.0']
    • ['mon.b', 'mgr.y', 'mds.b', 'mds.e', 'osd.1', 'osd.4', 'osd.7', 'osd.10']
    • ['mon.c', 'mgr.z', 'mds.c', 'mds.f', 'osd.2', 'osd.5', 'osd.8', 'osd.11']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=8fdcb7b339464688b97ff1d317544518
  • status: fail
  • success: False
  • branch: master
  • seed:
  • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
  • subset:
  • suite:
  • suite_branch: julpark_workload
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: ac62ca61d46e87208fa6292c9152faacd08c23b1
  • targets:
    • smithi073.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDOI24sMaeuQ72P0k7sTaKX+z8/RQkXTHqb31qJXxSv39TrZHy9z8xffO+YLyG3jauNkcxZ4FDKLXsOOXQk3CLHPSo9DVLfwZhYhx2c7FrIFxZtX8qpGuMZC1TYjLdl4hnfyZeexIU0tSn/KxC8r6CkzaE2nB+HJhdmgIqeqMOYti/yUbxemPCVgI0l0Cj5DRukWKIuJg+aNeb5jWZIBzBYLDaX9G+qYg4G124azpUXwf4jlhp2lzWYBG122mv07VFpHMiqLs2G21Z1H40qOhgBVjRG4NnpZzIy1bfqIoAJ6EJCfVeIcijHkCd5u5lrujxZf7PvI2Dp5bNJQE4RqspjjtJoytjvC96Wyv8BWvYkEQcJ1KGIbvCVjCgwYTkOw6UEaGX2lHpbKlu07tPIhQhtdxhDCZ3V4T463mBrGdncWOqP8E0gxSU4RvyuBq6h9UbUhT4XzVyHu+a89sKHtLU//8yZWnU8raDCCRExXbywPfauiyiBTIDxCWgWlCr30iM=
    • smithi023.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDwf3UfqSEDW17SIh21Otvu0mHB0Jlgo+sNs6GvzvhrIRmiSDatiz6JDMg8OYbHV5hWiflbmUL4RGfD2mivbpsmDAAncqsxPP9ULi1UxDV9RUCyftG1vahJb9h6Bj6k9e8xmsKd32uvRvQExr+SA+NKUddbGbPGjxVx0OnOBks2sVEAjXO03vYD6/SpQthfZ7UaeGwQLiBwNvzfO+7V4ZeE6VWUGaWTNLq1WLOM1PjezkCgCUskSI4rBuKlbq/kJysrHzP0LbCn9AvGxpzQVulXK3C/Qo1yonF/ilRAIx+dbEBbq9B95B65DXFLX42zrtwhWi2ymCrQv3aj6zItKomOpX4jvaQzAoTN7GJM8GHjDawfTT7U/dZSj+e7svBS3bmPjA1TIdy50NIBFRC94aMQ6SOXwyiz+ZRZH9RrppLO1OWkBwiGinf7qkBcHp1GwMRzhyH+Lw0Vyt1L0yrnnXDHeSv1AXqnwrJ8ydRfC5V9D9Iohgs5idvvDg+YGOvNbyc=
    • smithi095.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDXVMIo+EBQCdTzXSfk9AF7mz8uwpPMnXJDPHjT0hbCvNoVl3+PlLy7YSXIa1EH1xGgL2jtgyTZK1tC6eS7FqtqR3EOWbuRk8CAOkWwJS3Aj5N2Aq/oCMRE7LtleyE0UItoSh24mkkz6Ilz3ZJf0nyfBGLpWPKVyCU9/GgDIvgMYWezZwwW41nDSuLXXKEVb8hTzfMQWXSNWpF3ydt+ua/i1nxUzkB85tlgGq3qV3OFtik0nLtJwzisxPEfiO7nhQnQDFCiqqr4619bEARnhrJMqTvPV9Vam1njeIz9PtheYRIv6tPSnD1t1/oQDke15ZjJfTIwge+Vt+F3hu5ySV9uiHBFhys3cPS6dAG7JLDpSxDuEWex0vmjcf6piZrfGXDsr5P8/MBfO50hejogxG3tQYSFpTeFgWddeiFKTmv5fZDiVi4NnXJMLVCLI1Digl+NWmJZ1ITV4Yp8OHjt3/tBFJps0lT0NEWCu/2FoL0hxTysI7gcSgb5/GGYcKoUkgs=
    • smithi045.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDeP63QrHdYfmJ9ZR0FY1Jw0tv2TFSd6f3IfCD/fvEH82gognO+nAhOCAQqlTNa1jBvGr3YWHi2ViPEC9AXDjb7yIb0AIjao6CwpgcOXv5X7hiohiZZaLKEsvNtByuSNj53m52maLjCTXxychhiX5e6RuiKsyYRBxmupPB8v+PCb/ersbt3VmePXIsxByDSBEeKactEqu747n3tufZObJW8KZuTr2wgW9BVSQs2L+3O+GLLT8LPFVKCFNMHYXjujlIAaTjYJqxT3fMOVtTiRbxHJcb5uaY9VbHUex/4JxX4yY+tODWhrbG/XDGNo6/dANAmQFrK1gIwIreZRVN40StVeJ8Bo2vwDEny5ECGS7JATN4Inip2zwRJcLjWN0RGLM0JjjaaC0clYb3+nUu05kkLt5YG5qMxGNZEQLMbRwjpCKHbGiM27ktSgz1q919I2kThXEJ3inS8hu18G/Y3mcjwjUAk77EiTb0Ny7SNuqqtRZevSxPQo+HSs334pqbmYOE=
    • smithi196.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBhF+1AQc2wPkBNn6mwgzsw2IMfr3N80h3/ZaaerQhR8qIOpLSXOV1lnU+yhmCDq2JThxWz/SkkvKwjQv55Bqp92wTG5arub+/BUFzeOIuha51wSyvjCN9ChrZz+czbzo6bY6LdJd5ffViNIB2yqXTJ91zp2mEJO7vC16eJaj7he7T9Vmfxm3sqNbHaws6xMDbA6WpsognBcIHgif+rums+RsyIwgmfKMHj9X0mCn1N60G3Yst+XlEV0rFJ80G9p7AogNFWHXhnzxKpG5q++yD4JxgaIy6/1RTNCHlhocORrte9bS3yDlXGYN5atXcxOt5Fj+8ygPaeQelwrIsZ+cT5n/5KZwMcYLd1hAJltgv0V1+saipaDt0N4l2jA1xgo/FMe27PjjrOUDAHNCNnGuf6C7B/A1CPU2+4CQBZ/Ik4YpkpD2kIp3r4EHbAHWUDfoyx/F2cpnxuTN55rIED1vichhZaMIviGVetRlR+QkwjffxbX27QqfTMAxprvmGHJ8=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • cephadm:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • roleless: True
      • cephadm_mode: root
      • sha1: bfde964cf0400f19b7ae46122039982198a0f4db
      • cluster: ceph
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • rados mon op timeout: 15m
          • rados osd op timeout: 15m
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • rados osd op timeout: 15m
          • debug ms: 1
          • rados mon op timeout: 15m
          • debug client: 20
          • client mount timeout: 600
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 20
          • debug journal: 20
          • osd_max_omap_entries_per_request: 10
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
          • mon op complaint time: 120
        • mgr:
          • debug ms: 1
          • debug mgr: 20
      • cephfs:
        • session_timeout: 300
        • ec_profile:
          • m=2
          • k=2
          • crush-failure-domain=osd
        • max_mds: 3
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
    • cephadm.shell:
      • host.a:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
    • cephadm.shell:
      • host.a:
        • ceph fs volume create foo
        • ceph fs volume create bar
    • sleep:
      • interval: 60
    • ceph-fuse:
    • ceph-fuse:
      • client.0:
        • cephfs_name: foo
    • ceph-fuse:
      • client.1:
        • cephfs_name: bar
    • check-counter:
      • workunit:
        • clients:
          • all:
            • direct_io
    • teuthology_branch: master
    • verbose: True
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2021-03-03 20:05:24
    • started: 2021-03-04 19:10:31
    • updated: 2021-03-04 19:33:07
    • status_class: danger
    • runtime: 0:22:36
    • wait_time: 0:07:35