Description: kcephfs/recovery/{clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mounts/kmounts.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/config-commands.yaml whitelist_health.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2019-06-01_05:20:02-kcephfs-luminous-testing-basic-smithi/3996714/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2019-06-01_05:20:02-kcephfs-luminous-testing-basic-smithi/3996714/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2019-06-01_05:20:02-kcephfs-luminous-testing-basic-smithi/3996714
  • description: kcephfs/recovery/{clusters/1-mds-4-client.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mounts/kmounts.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{frag_enable.yaml log-config.yaml osd-asserts.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} tasks/config-commands.yaml whitelist_health.yaml}
  • duration: 0:14:09
  • email: ceph-qa@lists.ceph.com
  • failure_reason:
  • flavor: basic
  • job_id: 3996714
  • kernel:
    • flavor: basic
    • sha1: 242498b140e4d41c85c82067e1e2681e158f773e
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2019-06-01_05:20:02-kcephfs-luminous-testing-basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • workunit:
      • sha1: 22f7d1ad63fb5b73970e19c3b6f889d5536f7ed3
      • branch: luminous
    • ceph:
      • sha1: 22f7d1ad63fb5b73970e19c3b6f889d5536f7ed3
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
        • overall HEALTH_
        • \(OSD_DOWN\)
        • \(OSD_
        • but it is still running
        • is not responding
        • overall HEALTH_
        • \(FS_DEGRADED\)
        • \(MDS_FAILED\)
        • \(MDS_DEGRADED\)
        • \(FS_WITH_FAILED_MDS\)
        • \(MDS_DAMAGE\)
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • osd op complaint time: 180
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds verify scatter: True
          • mds bal fragment size max: 10000
          • mds op complaint time: 180
          • mds debug scatterstat: True
          • mds debug frag: True
        • client:
          • debug ms: 1
          • debug client: 20
          • client mount timeout: 600
        • global:
          • lockdep: True
        • osd:
          • mon osd full ratio: 0.9
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • mon osd backfillfull_ratio: 0.85
          • mon osd nearfull ratio: 0.8
          • osd op complaint time: 180
          • bluestore block size: 96636764160
          • debug filestore: 20
          • debug rocksdb: 10
          • osd shutdown pgref assert: True
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon op complaint time: 120
      • cephfs_ec_profile:
        • m=2
        • k=2
        • crush-failure-domain=osd
    • install:
      • ceph:
        • sha1: 22f7d1ad63fb5b73970e19c3b6f889d5536f7ed3
    • admin_socket:
      • branch: luminous
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mgr.y', 'mds.a', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['mon.b', 'mon.c', 'mgr.x', 'mds.a-s', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
    • ['client.1']
    • ['client.2']
    • ['client.3']
  • sentry_event:
  • status: pass
  • success: True
  • branch: luminous
  • seed:
  • sha1: 22f7d1ad63fb5b73970e19c3b6f889d5536f7ed3
  • subset:
  • suite:
  • suite_branch: luminous
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 22f7d1ad63fb5b73970e19c3b6f889d5536f7ed3
  • targets:
    • smithi089.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcvYtR9rAa5+rHUeD1leMLeVRWfheZqkxTaEDS+jjk5JkF4TSiMECdOCR2U4Yz0e5GMqTObNArGGBHAS97U/87n0nkGHWeXVI8JWaq2vWpc6PXSmryQuskv4LjjNyDOvN/7ohzukiWSMU/b9egnbeVDk2z8jV/E8S9L6boU6blxCIFjevA9GzD09g7G+GOwX0BwKWmimiS3Zrf4mDqEjeJeMAP8yZUlhvzJtMeazbeeGDYXkPeUp6LKeinrsguwvkUmSp4mPOSt2ZQlM2Cvjzo2fL0uw49gejpn//HMPhuxgYqNlJ9FTDukGmqq7p5BhQMvTIjKe+O0v8GxGixD87Z
    • smithi145.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpH8mRRT/XQSfx9z3jAez1243XKXGesAnVYb4hJlU0v+jjhUg5TwYBcTbKNWZWs6JEMxf/k/tKdGoVqIPZQaoMNpVa7GN0lvP+cl/UI6vPjmVHKyFdbXejErJeuk3KojJb7Dc34o6L5GDOZVnZ/FXKaJ9b9nlfW1rHOfqwZ//6Otiz3BM2GspGCXnR+1Cff9/94cWloFbVWemzCpRuTK7Ip8YOe/WzVAtCwVX8/lLG7A8PIkCe4vegSqbC+oySvO0RvRc1yuACIeSkGZwMs8L24UgDi71q/NZxm+nZztHBecVc+qzNra/nJDZ2HqhLY0PL/ubqAaEFuFBv6BczgVhl
    • smithi039.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCooXqjiqGVMnpmnFC5PYziKGvPsfpyJgiWvqwbaamv/PgXQiB6UgBtBRwfQpi25mMhEU+CU66SUv9zvZKeoX0DMrHkU6q/9gfmgyRluLm5rhwUStg+eu1ZGZvcLkFYZNe7hmo4kfc05/szcnYsLN22yX9LQ7GEX/oKHyvSnIscrAG9relT9fpxwlAH2+aASPbIkfRvSxGuVDDUTZqY++yGUMhycs7q89GtdgXFfqTw+/DZYpVnqwbKzBI/m4Z7aUOC8rjyV7++gAsk2Ggc4jLbyr5gyXbFse/bYl8tR/hCv+zqE4skgY9euHV4oI6lna0Y08bWjZwBPrdcfz4ozBQh
    • smithi001.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+M/iEXsQ2U8/kw8tXJW1nE5wreNdGNTMjM+eCtmwELaXYmGAoFLKR2Cw/IqrlmNsJKhYfS+gjBvy27VoaOLsB5DnERe892JOiOO8VXqfVxFV4EnSTy8Bvb/1FDzPo/ZtPfubO+1z9uXM7xnu7GPV/DK4w7yf7rkG1mG4BVjytY551Emoj8fSB0PwZGflynIjEC2Oa2yTnzr/1p5qRm3fH5FlmMi3mDyoTIWTtWcYvbwHbuiwQIAmm/EovsROXtITIY+MtKIpujdv+1Pxq8hV9ULNN/5QnpwhrzaKQDZ9QfGWrMj8w3c1DBypLhIGjAwrqIBfK+4272q+WQLsjTq5h
    • smithi040.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCto6ibfbgwhc+QuKJ4vrGXclc21wWfvs0dGiulDbf7tB+/Mowz9fr8r+zCJF+OTToZxDAgoE6PDf3yjVRKTbFqQq2fgO+5+SlbeTMkOXVU/lzcydlU5GmpIO9J6DCQOcK2QAzIjSrqt6zzLfm1bVshdzxHrTqGu629iz5xgw3iPDzXfhOtkHj5LgTI1SygiVVCKQZIWfeDxt+tDAblFUJEUXyvQJvT+Nk7FDJdkOEks1y8vgVd7b43PeZPSxOFECtM9K3jcoydNpExm26Sjs5ifF+xSjikuARs6jazWEAu338L0+a+plCPukYADZnCEeXc41z+UYj+Lxli3zFMegl3
    • smithi061.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCob54U3RDmPZ8IMkWzYeZB9edOdnnNWa8YB8ynPIHD+gpzvcvcbTDObt9W/n2gidELp4aaTgzoQHq2ybOsdei7eUXGqUCRLud/v/ZlbkrJVGWVnB8GrY+H9gDo01bGG5T9whjN3QOkg6LZDcrxXUIWUWww7nb8RYWUsj34Jjh+0mk1BoiNmMNzOL1YV+d8DJuF21XvVjZZCbfR5HkJAhKlWFxKYGXQ45f34Hho312WmSI5oAjJ0PBCWSz7HwBS/AEHmFu/PSyeiWffu+WI13aLmYjqxTSlqCJmRmQhIoytUnApSIufWfAe4gnaih3jCFRAOdNWplVNInwnMsYrTI5z
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 6
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • flavor: basic
      • sha1: 242498b140e4d41c85c82067e1e2681e158f773e
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • kclient:
    • cephfs_test_runner:
      • modules:
        • tasks.cephfs.test_config_commands
      • fail_on_skip: False
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2019-06-01 05:23:01
  • started: 2019-06-05 21:37:07
  • updated: 2019-06-06 03:25:12
  • status_class: success
  • runtime: 5:48:05
  • wait_time: 5:33:56