Description: rados/multimon/{clusters/21.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados/{op_queue/pool.yaml rados.yaml} tasks/mon_clock_with_skews.yaml}

Log: http://qa-proxy.ceph.com/teuthology/ivancich-2018-02-21_21:00:05-rados-wip-pool-unit-dmclock_bf-patch---basic-smithi/2212855/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/ivancich-2018-02-21_21:00:05-rados-wip-pool-unit-dmclock_bf-patch---basic-smithi/2212855/teuthology.log
  • archive_path: /home/teuthworker/archive/ivancich-2018-02-21_21:00:05-rados-wip-pool-unit-dmclock_bf-patch---basic-smithi/2212855
  • description: rados/multimon/{clusters/21.yaml mon_kv_backend/leveldb.yaml msgr-failures/few.yaml msgr/random.yaml objectstore/bluestore.yaml rados/{op_queue/pool.yaml rados.yaml} tasks/mon_clock_with_skews.yaml}
  • duration: 0:13:53
  • email: ivancich@redhat.com
  • failure_reason:
  • flavor: basic
  • job_id: 2212855
  • kernel:
  • last_in_suite: False
  • machine_type: smithi
  • name: ivancich-2018-02-21_21:00:05-rados-wip-pool-unit-dmclock_bf-patch---basic-smithi
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph-deploy:
      • fs: xfs
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • mon osd full ratio: 0.9
          • mon osd backfillfull_ratio: 0.85
          • bluestore fsck on mount: True
          • mon osd nearfull ratio: 0.8
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • bluestore block size: 96636764160
          • debug rocksdb: 10
          • osd failsafe full ratio: 0.95
      • bluestore: True
    • workunit:
      • sha1: c7b9cccd40cb72c565407dce463eb1785341fee5
      • branch: eric-qa-dmclock-pool
    • ceph:
      • log-whitelist:
        • slow request
      • fs: xfs
      • conf:
        • client:
          • objecter mclock service tracker: True
        • global:
          • enable experimental unrecoverable data corrupting features: *
          • ms type: random
          • ms inject socket failures: 5000
        • osd:
          • mon osd full ratio: 0.9
          • osd op queue cut off: debug_random
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • osd debug verify missing on start: True
          • osd op queue: mclock_pool
          • mon osd backfillfull_ratio: 0.85
          • osd debug verify cached snaps: True
          • bluestore block size: 96636764160
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon keyvaluedb: leveldb
      • sha1: 1b3d19a46b2777c732e81a038659890843797bb2
    • install:
      • ceph:
        • sha1: 1b3d19a46b2777c732e81a038659890843797bb2
    • admin_socket:
      • branch: wip-pool-unit-dmclock_bf-patch
    • thrashosds:
      • bdev_inject_crash_probability: 0.5
      • bdev_inject_crash: 2
  • owner: scheduled_ivancich@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.d', 'mon.g', 'mon.j', 'mon.m', 'mon.p', 'mon.s', 'osd.0']
    • ['mon.b', 'mon.e', 'mon.h', 'mon.k', 'mon.n', 'mon.q', 'mon.t', 'mgr.x']
    • ['mon.c', 'mon.f', 'mon.i', 'mon.l', 'mon.o', 'mon.r', 'mon.u', 'osd.1']
  • sentry_event:
  • status: pass
  • success: True
  • branch: wip-pool-unit-dmclock_bf-patch
  • seed:
  • sha1: 1b3d19a46b2777c732e81a038659890843797bb2
  • subset:
  • suite:
  • suite_branch: eric-qa-dmclock-pool
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: c7b9cccd40cb72c565407dce463eb1785341fee5
  • targets:
    • smithi198.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSiSH7sYGd2lSoQJl+4OEi0md6fVwwmIB8EL8MEsNT0EDabvPCreke4joBTDwPPo19b6KRbeXEH49PYK7/A7dEV2EC0frOI7nUDLbUew+/nEJQbEiLWrGyRIx9d++sQqVXv57Xtk6S8AXwRKdppJHvJ6XgCDCFxcdXECz9R6PNvZt8hVA82L+PVtL2Q4ghXtGsPi41fa+SS6HI5DqdSPySZZT69o6e1O+0Nw39Z3AyCjJCXiS/EqOEDNNE0AQpSkY8aBG99HeqDxJNJi5pNO4Dexz9kO/3nN0zlhHAE3TYVujpDVczvgoyBbBtaZV1yuhvREpAUMFj1K+24BCjuD8j
    • smithi095.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYbiDnX34AtaMpykVUm4wK8ydmY5mQW/BLpbb2QGiDDvAJnpImF0yN447Q03ORhZuSdN04Gm6yR7EaS+yTIDLl4LYMsvrLiByV0MIfYd9eIgM9s1jeS7vwWGfuhVJF9cu6UbrGd8qM0qo9kvGhu3PKBA8MEOJFBO6MtPrUuv1Ka5RXfPs+/1MFqvtZt+KdFuL5GHUVvzONrsh4lzPvsnPlvaiTYC8MHdeBURytlDIPzbTsKUg6n/OhXj4YplZ5GhRcchdvriCN1LYntyiZmJopBAb2peg+P7NqF/5piezrgS4XiSVS/aTZwqS9gTNI1ccy5py7YgC+9B37ScpWcFDF
    • smithi173.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCSENHIdPUWlZho2c0p1pH3Q4e7YejcWH/5RX1AAv7fOYtwnPZ1n+Ua53YmCRZyi9yRjhQgNurcpuVAF6a9+q2KaRmgSEOUVkP9eSQfhLrDd5+32fBXeMzNdK/oLWBnXc6iScTU7f44vDeSzXemvuCc9XirmGvN5zG0aYamK9hzZ18FSZfNKJcg24vCClgnjcveJHU3VIAZ0gGoyHbHf7AQPDFokv6KVNmG52P8i+jhuYa85VRd0pe3OT8wSsEhy5n5+HAwzUPFNw/kyWkq6f2Yyk2UUbcBFd1GuT9T41c4PuJ0nz8BqviI+5Bp7BKK9CHAU7jpEi4c4t9q6MlyL54n
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 3
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • exec:
      • mon.b:
        • date -u -s @$(expr $(date -u +%s) + 10)
    • ceph:
      • wait-for-healthy: False
      • log-whitelist:
        • slow request
        • .*clock.*skew.*
        • clocks not synchronized
        • overall HEALTH_
        • \(MON_CLOCK_SKEW\)
        • \(MGR_DOWN\)
        • \(PG_
        • No standby daemons available
        • slow request
      • sha1: 1b3d19a46b2777c732e81a038659890843797bb2
      • cluster: ceph
      • fs: xfs
      • conf:
        • client:
          • objecter mclock service tracker: True
        • global:
          • enable experimental unrecoverable data corrupting features: *
          • ms type: random
          • ms inject socket failures: 5000
        • osd:
          • mon osd full ratio: 0.9
          • osd op queue cut off: debug_random
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • debug bluestore: 20
          • debug bluefs: 20
          • osd objectstore: bluestore
          • osd debug verify missing on start: True
          • osd op queue: mclock_pool
          • mon osd backfillfull_ratio: 0.85
          • osd debug verify cached snaps: True
          • bluestore block size: 96636764160
          • debug filestore: 20
          • debug rocksdb: 10
          • mon osd nearfull ratio: 0.8
          • osd failsafe full ratio: 0.95
          • bluestore fsck on mount: True
        • mon:
          • debug ms: 1
          • debug mon: 20
          • debug paxos: 20
          • mon keyvaluedb: leveldb
    • mon_clock_skew_check:
      • expect-skew: True
  • teuthology_branch: master
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2018-02-21 21:02:36
  • started: 2018-02-21 21:45:14
  • updated: 2018-02-21 22:05:16
  • status_class: success
  • runtime: 0:20:02
  • wait_time: 0:06:09