Description: kcephfs/recovery/{begin clusters/1-mds-4-client conf/{client mds mon osd} kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root overrides/{frag_enable log-config osd-asserts whitelist_health whitelist_wrongly_marked_down} tasks/mds-full}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2021-05-12_22:21:53-kcephfs-wip-yuri3-testing-2021-05-12-1004-octopus-distro-basic-smithi/6112119/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=6eb3f30f70c743e6a9325f08cf1636e9

Failure Reason:

Test failure: test_full_fsync (tasks.cephfs.test_full.TestClusterFull)

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2021-05-12_22:21:53-kcephfs-wip-yuri3-testing-2021-05-12-1004-octopus-distro-basic-smithi/6112119/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2021-05-12_22:21:53-kcephfs-wip-yuri3-testing-2021-05-12-1004-octopus-distro-basic-smithi/6112119
  • description: kcephfs/recovery/{begin clusters/1-mds-4-client conf/{client mds mon osd} kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root overrides/{frag_enable log-config osd-asserts whitelist_health whitelist_wrongly_marked_down} tasks/mds-full}
  • duration: 0:17:57
  • email: ceph-qa@ceph.io
  • failure_reason: Test failure: test_full_fsync (tasks.cephfs.test_full.TestClusterFull)
  • flavor: basic
  • job_id: 6112119
  • kernel:
    • client:
      • branch: testing
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2021-05-12_22:21:53-kcephfs-wip-yuri3-testing-2021-05-12-1004-octopus-distro-basic-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 18.04
  • overrides:
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • workunit:
      • sha1: 19aef0c0fbf48152ec623e79e93179a9d29497b9
      • branch: wip-yuri3-testing-2021-05-12-1004-octopus
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • fs: xfs
      • cephfs_ec_profile:
        • m=2
        • k=2
        • crush-failure-domain=osd
        • disabled
      • sha1: 19aef0c0fbf48152ec623e79e93179a9d29497b9
      • conf:
        • global:
          • ms die on skipped message: False
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • client:
          • rados osd op timeout: 15m
          • debug ms: 1
          • rados mon op timeout: 15m
          • debug client: 20
          • client mount timeout: 600
        • mon:
          • mon osd full ratio: 0.7
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
          • mon osd nearfull ratio: 0.6
          • debug ms: 1
          • mon osd backfillfull ratio: 0.6
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • rados mon op timeout: 15m
          • rados osd op timeout: 15m
          • mds debug scatterstat: True
          • mds debug frag: True
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 1
          • bluestore fsck on mount: True
          • memstore device bytes: 200000000
          • debug osd: 20
          • bluestore compression mode: aggressive
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: memstore
          • osd mon report interval: 5
          • mon osd backfillfull_ratio: 0.85
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • osd shutdown pgref assert: True
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 1.0
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • \(FS_INLINE_DATA_DEPRECATED\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • OSD full dropping all updates
        • OSD near full
        • pausewr flag
        • failsafe engaged, dropping updates
        • failsafe disengaged, no longer dropping
        • is full \(reached quota
        • POOL_FULL
        • POOL_BACKFILLFULL
    • install:
      • ceph:
        • sha1: 19aef0c0fbf48152ec623e79e93179a9d29497b9
    • admin_socket:
      • branch: wip-yuri3-testing-2021-05-12-1004-octopus
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'mds.b', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.c', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=6eb3f30f70c743e6a9325f08cf1636e9
  • status: fail
  • success: False
  • branch: wip-yuri3-testing-2021-05-12-1004-octopus
  • seed:
  • sha1: 19aef0c0fbf48152ec623e79e93179a9d29497b9
  • subset:
  • suite:
  • suite_branch: wip-yuri3-testing-2021-05-12-1004-octopus
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 19aef0c0fbf48152ec623e79e93179a9d29497b9
  • targets:
    • smithi174.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQClGA08ZxxIVOcJT0407xHu3PHU8Mkw3y9TsTD23ZC7y3K9CVL1zB/mXCB71gHzbx7PgwBKArjjfGEus52NKKscIL1dcGxmKfXSs6m1lFT4SRubRFPrOtoHkrpXmCK16jmD9Nj7rBB4s1RxlOpFIeAYcGck/Zq1aRRWY3hXYed6GSCv11Z3+nh1H0WbXUoSZWQQQ/s0xyMnEoSNMQTfi/7pk2div36pjkcXKgkdkhGzfvwiqdPOmEEp4p446n1f3H8AKWl1uKGCcE+qbMmd0aHXdymcXZqZuZRoR58+YfjOgyR/kmUbyJOT8mj6ow3nMKY3aO/kt+wS0o7pnIgOk3rN
    • smithi176.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDM5GrWHd1sbo64bUhRUjc2bh187jb9wgSym35u3qAePpA5itnUNRC2x9rHozoaInM1j1ygdHjP1BgQOnHVgPGt7EQ+EW4wUW1/tLJoNf54oxAKPRDU2pRI9hb8HlukFXQyD3HDVI4kvV61zjWkJY8qFl12k4+Eg9fxFCgGFjsobz2tnw9G35hzigZ8f17FR0u4Ph1QXfxuOqs3TiFW7TW7+AohxfHKyrfs4uThoZYEE8cKbwcTZOP1qE8gTzJXRnGOYk7gmMp1AGinX8/S/TABUdyEKu8tCI9a8B4WcySr55+y5OBFMlPi/hImaqT4XK6+eAkWChrpju2j2oXAZykn
    • smithi013.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDg+vHk3591NUnzcZW+bnTJ3k7ZCUcO5SO4PCn9J38FESe5NWEh45rlSF9GEvrTIOEwVNsiN1MzuFp6iJjHsZu3BJs4TOVcmdkfiadZLctoOzv+RncvM0WVqgu21/tX+Xc5uhi+6H8a8N5vMoA5VQ9JxhxK2sWLzoWkRuHd1SxcX3z2AdlIkuwWnpMhKqUzLBTmiy9f84ddnxw/UFJX+fpt/WDIunFlp8TBOf7P7mGYfZAKotn64iNHIjE8NDtJWw2bRcmMuEWfAl0AZQYuU5OqHxQhEHzArM6P+gIH4fQic06ce3NhRsr68c91K4kcFSPGBaF7JNe7LbWgsyQmL+eB
    • smithi154.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzB/qoWxMB+qPwDhb01Vtu3KyqSGTZ8l+XDtNf7HQFi2gVPZ0JctNO0aHsTx0wBq2eWZah6TJaPMOyrCOb0w+jZbu7rX8xkwIykDtmB9f/G7nlrLHHitVmKNQTCfBjUpGJduoDSy02C2hWY//DMIJjwYbO52rLSVYbjz5arjuWAi/QmeAUAasHbI606UFCMSY0eHANP8aaAPOmlX6DihLE7iXqyB4papUep4DG3tYwf9gB3SCWQYmlUFh1PsSbu31q3v6FVEfzCtuzvu4KgRyaSiQGcPAthcpLnByuvAFg5l/63DoXsygpyCyFPNR/qiELcQTOsZR0ZqK6qh9K0bTz
    • smithi107.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwrvim9O76w3ZQhHr+8H/IuTCMmNWBgIMxlsKSPijAj1jqsMfya88xQcvqASBggD2s+nsr4DrmX+2N6peZfOxN4TIV6bgUOde3YZGKIePjC7oNStavw89KNnA82QKmXg0nrXhqkrk9AiEUtDuxMuq3Z1ATfndVehGR3xDXDzhfvR30eopC0EUE8RiaOBJrorJj0efr5FhqyAZcFyQHtiq/tRBZ+4/y22cg0aBv6MTBNymB3FjTyw+0AMB+Qc59f2/UEbF3VKBhbkNxOvL9n1DIBXJ+tUDimhri9J+Wq7cMf6r3nebWePAJb7kQR42UcH2x1iCEpXd8f/Yn2BhqZfqJ
    • smithi055.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAyzTT9EjBgWHnE12ctb1lcySutm6qscutCu/cUHUu27D9IpQfssrY/vFzgUbs6l1xNpizk96DCxdIKGsfhZjI+v2EfQiBLtNGdxWbQNcB6vPsLTr/rU4QRtBN5hhIpHPn2IIFb+fZBNbsb/PwIl5yLQtQfrBs0A+IGX0kqeO/Wn76iRR/by8p10luRaOBGmIKoIPRS4DcQLaQ6d/x5bXLPyXK+tqkhD7AiA3BAOfPxgYtDlRSBL1cVGB16iR8L6pzY4qYEYSnRkqKCg2vNcSaNuw/EkYCEpbfYzp4wnm6dV79wEvo8Hu8Iro6bE/vDBWoGDIzgKmkUcniFj9xhpKB
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • client:
        • branch: testing
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • bison
          • flex
          • libelf-dev
          • libssl-dev
          • dump
          • indent
          • libaio-dev
          • libtool-bin
          • uuid-dev
          • xfslibs-dev
        • rpm:
          • bison
          • flex
          • elfutils-libelf-devel
          • openssl-devel
          • libacl-devel
          • libaio-devel
          • libattr-devel
          • libtool
          • libuuid-devel
          • xfsdump
          • xfsprogs
          • xfsprogs-devel
          • libaio-devel
          • libtool
          • libuuid-devel
          • xfsprogs-devel
      • extra_packages:
        • deb:
          • python3-cephfs
          • cephfs-shell
        • rpm:
          • python3-cephfs
      • sha1: 19aef0c0fbf48152ec623e79e93179a9d29497b9
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_full
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2021-05-12 22:22:44
  • started: 2021-05-12 23:24:27
  • updated: 2021-05-12 23:55:50
  • status_class: danger
  • runtime: 0:31:23
  • wait_time: 0:13:26