Nodes: ovh036 ovh069

Description: fs/traceless/{begin.yaml clusters/fixed-2-ucephfs.yaml fs/xfs.yaml mount/fuse.yaml objectstore/filestore.yaml overrides/{debug.yaml frag_enable.yaml whitelist_wrongly_marked_down.yaml} tasks/cfuse_workunit_suites_fsstress.yaml traceless/50pc.yaml}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2017-07-29_05:10:02-fs-kraken-distro-basic-ovh/1459645/teuthology.log

Failure Reason:

"2017-07-30 04:32:07.870147 osd.1 158.69.94.130:6804/9662 4 : cluster [ERR] 1.3 shard 2: soid 1:f83d65d8:::200.00000004:head data_digest 0x5023ec2c != data_digest 0xbce64cbf from shard 1, data_digest 0x5023ec2c != data_digest 0xbce64cbf from auth oi 1:f83d65d8:::200.00000004:head(7'2022 mds.0.7:3025 dirty|data_digest|omap_digest s 4194304 uv 2022 dd bce64cbf od ffffffff alloc_hint [0 0 0])" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2017-07-29_05:10:02-fs-kraken-distro-basic-ovh/1459645/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2017-07-29_05:10:02-fs-kraken-distro-basic-ovh/1459645
  • description: fs/traceless/{begin.yaml clusters/fixed-2-ucephfs.yaml fs/xfs.yaml mount/fuse.yaml objectstore/filestore.yaml overrides/{debug.yaml frag_enable.yaml whitelist_wrongly_marked_down.yaml} tasks/cfuse_workunit_suites_fsstress.yaml traceless/50pc.yaml}
  • duration: 0:24:04
  • email: ceph-qa@ceph.com
  • failure_reason: "2017-07-30 04:32:07.870147 osd.1 158.69.94.130:6804/9662 4 : cluster [ERR] 1.3 shard 2: soid 1:f83d65d8:::200.00000004:head data_digest 0x5023ec2c != data_digest 0xbce64cbf from shard 1, data_digest 0x5023ec2c != data_digest 0xbce64cbf from auth oi 1:f83d65d8:::200.00000004:head(7'2022 mds.0.7:3025 dirty|data_digest|omap_digest s 4194304 uv 2022 dd bce64cbf od ffffffff alloc_hint [0 0 0])" in cluster log
  • flavor: basic
  • job_id: 1459645
  • kernel:
    • sha1: distro
    • kdb: True
  • last_in_suite: False
  • machine_type: ovh
  • name: teuthology-2017-07-29_05:10:02-fs-kraken-distro-basic-ovh
  • nuke_on_error: True
  • os_type:
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
        • wrongly marked me down
      • fs: xfs
      • conf:
        • mds:
          • mds bal split bits: 3
          • mds bal split size: 100
          • mds inject traceless reply probability: 0.5
          • debug mds: 20
          • mds bal merge size: 5
          • debug ms: 1
          • mds bal frag: True
          • mds bal fragment size max: 10000
        • client:
          • debug ms: 1
          • debug client: 10
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 25
          • osd objectstore: filestore
          • osd sloppy crc: True
          • debug filestore: 20
      • sha1: e56d4c481dead2f8ccb7baaae80db2f4acfc7bf0
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • workunit:
      • sha1: e56d4c481dead2f8ccb7baaae80db2f4acfc7bf0
      • branch: kraken
    • install:
      • ceph:
        • sha1: e56d4c481dead2f8ccb7baaae80db2f4acfc7bf0
    • admin_socket:
      • branch: kraken
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mds.a', 'osd.0', 'osd.1', 'client.0']
    • ['mon.b', 'mds.a-s', 'mon.c', 'osd.2', 'osd.3']
  • sentry_event:
  • status: fail
  • success: False
  • branch: kraken
  • seed:
  • sha1: e56d4c481dead2f8ccb7baaae80db2f4acfc7bf0
  • subset:
  • suite:
  • suite_branch: kraken
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: e56d4c481dead2f8ccb7baaae80db2f4acfc7bf0
  • targets:
    • ovh036.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGXz0w7n2x71DrhsZo8Ex9WtnC/o/zYlNPfhmMT5PjM91pr3uDyILqUIRh2WrL7Qkx9guU+Il9/lW1j65wMxZPrVTg3m52tAUnqYrn64n3H9f6YbwQTg0cHokvYv5SbAzNLk3uqgOXMgpqX557LdnRF7n4Ulky8suVioe5UePeUvlkZFgK+x74J4f7lo/ljAYQMhywHxtsQScTH/N7Cm3bb+jsIrrTDMqcNWDM0iK1Xb4paZJASZ97VTNLwrev4OzrewXC0MBCX/X+uiisGvdrvmj62YFQCnxLXzXEJn19gUNrWdvH6DP6BN/k/niwFUBZt7PBG1g+GzeVSMQeeaNv
    • ovh069.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Uqcq0J0dbOc8aaRitvfBIEwAvZCNtCWbLXNey+O2YdVW1U/PL2hED66GiPdzqwBa2ZA03dLrIuDQjVrjW3lWIyktVGUG8I9myP0xqcge5XfPudwEzU4FV8SuemL4X18fuFD/sRpp9paao5eLDrv4s1+qgVJv0sKnOQx4DZVqfcusp2389LGiFPhMyRxAb5YO7qoWAYcRZ40q4YVbyaQ6Uhh92Lza1esf1jtszXcawR9Hpqsp+EA9WEx1zdy3OKigJuRZrORx/sEVV0AiHlf0WJLIL5swgVl6dxD6yCFMEGRShx/tcGd9EHFKeDcKl4RzsFaV+3XjMZVB4DkIiIiJ
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 2
      • ovh
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
    • ceph:
    • ceph-fuse:
    • workunit:
      • clients:
        • all:
          • suites/fsstress.sh
      • branch: kraken
      • sha1: e56d4c481dead2f8ccb7baaae80db2f4acfc7bf0
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2017-07-29 05:11:03
  • started: 2017-07-30 04:03:33
  • updated: 2017-07-30 04:37:33
  • status_class: danger
  • runtime: 0:34:00
  • wait_time: 0:09:56