Description: rbd/librbd/{cache/none.yaml cachepool/small.yaml clusters/fixed-3.yaml fs/btrfs.yaml msgr-failures/few.yaml workloads/qemu_tiobench.yaml}

Log: http://qa-proxy.ceph.com/teuthology/loic-2015-01-08_10:36:47-rbd-giant-backports-testing-basic-vps/690659/teuthology.log

Failure Reason:

Command failed on vpm128 with status 8: "wget -nv -O /home/ubuntu/cephtest/qemu/client.0.test.sh 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/tiobench.sh' && chmod 755 /home/ubuntu/cephtest/qemu/client.0.test.sh"

  • log_href: http://qa-proxy.ceph.com/teuthology/loic-2015-01-08_10:36:47-rbd-giant-backports-testing-basic-vps/690659/teuthology.log
  • archive_path: /var/lib/teuthworker/archive/loic-2015-01-08_10:36:47-rbd-giant-backports-testing-basic-vps/690659
  • description: rbd/librbd/{cache/none.yaml cachepool/small.yaml clusters/fixed-3.yaml fs/btrfs.yaml msgr-failures/few.yaml workloads/qemu_tiobench.yaml}
  • duration: 0:06:43
  • email: loic@dachary.org
  • failure_reason: Command failed on vpm128 with status 8: "wget -nv -O /home/ubuntu/cephtest/qemu/client.0.test.sh 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/tiobench.sh' && chmod 755 /home/ubuntu/cephtest/qemu/client.0.test.sh"
  • flavor: basic
  • job_id: 690659
  • kernel:
    • sha1: c191413b4b4979b0607fd92ff1097351e48c6bef
    • kdb: True
  • last_in_suite: False
  • machine_type: vps
  • name: loic-2015-01-08_10:36:47-rbd-giant-backports-testing-basic-vps
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version:
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
        • wrongly marked me down
      • fs: btrfs
      • conf:
        • global:
          • ms inject socket failures: 5000
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 20
          • osd sloppy crc: True
          • osd op thread timeout: 60
          • debug filestore: 20
      • sha1: 0ea20e6c51208d6710f469454ab3f964bfa7c9d2
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
          • debug mon: 1
          • debug paxos: 20
          • debug ms: 20
      • branch:
        • dev: giant-backports
    • workunit:
      • sha1: 0ea20e6c51208d6710f469454ab3f964bfa7c9d2
    • install:
      • ceph:
        • sha1: 0ea20e6c51208d6710f469454ab3f964bfa7c9d2
    • admin_socket:
      • branch: giant-backports
  • owner: loic@dachary.org
  • pid:
  • roles:
    • ['mon.a', 'mon.c', 'osd.0', 'osd.1', 'osd.2']
    • ['mon.b', 'osd.3', 'osd.4', 'osd.5']
    • ['client.0']
  • sentry_event:
  • status: fail
  • success: False
  • branch:
  • seed:
  • sha1:
  • subset:
  • suite:
  • suite_branch: giant
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1:
  • targets:
    • ubuntu@vpm103.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCh8yV76ni+b09ElQlre0eC94LmyIOuYULXhg1aFMWNr0kPvuMG/KrzZ87g/VWy2wyZ4tVfZMK5bkvuXn8tWP7ns88Z9QZ448JgYiwuDEU41FrVu2dCju/0seHc/e86zp9C37UdUuvns/kK/Hjk+rWHzICD2wQ0TvtmijiRC5N0e0za6AYXpeY242PFXkmlSvvIUwZSMLUn8VExPL/4MyzhBIJZDVectYYBQ/Aio2H9lnxUiloIH65DBY4C4aEwUvr/Uk6AgRgX817LtOC/zu3cDA8CUXIHdK41db+Se2DXlkKfWod++Aopg/qwzsAiXFLvRpY2O9ln0A6SpYE5rkzB
    • ubuntu@vpm059.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0WrzLvP3o8/SFuGos3zmsbDUAUTl539y7d4gRDs5Wt5gKogeEDZ/MnZShSz5YfplvS9xbeENK/OzMEZbPagivESAu76vjbs4jQ1AdbI/37I6PVaRT846hiHcin8cnvqkrN2DkrU5daooqrStCLXC7itJwyaLXcm9guRuBRq+25b7/O/M2FjFClzRP27lwtXUpPImoJVDEEk19yhbau4oGPTeOTGSE5Dkbvlj6SskxuTxlWEE+PnIZcIJIb12dAq2MpN/Fm0lI4uVPMKGVdyz5hZ0bBDA3J5yVhUG0ZAC9Cw1coO5tMHZDUUPlqBhxuIbwdZztyxxV4zgOaf5Aq92L
    • ubuntu@vpm128.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYl9Wv0qfFHXlCAyKS4GOYNkw0GIog6bzXRQNeGevmG1tGDvdyTh3114jyK//EfjB64vXNSVjyWh1lTnvLoUGShY5qLCT18pkksf0EC+kHCtpPvyn4N4I00QluJuMM++bTIUSLEsmbDWR8jzVt6+eY6MxoQHqjlpJWXG80rYNgnk3uOe7xMiudnp/8TxR6Cny5MtSg8MQpFhNvCUW8iP1xOXb5Jyl1umihYeE39Gc3GIJ1h2/Ub60M4YAk7+fsDAxCTg+4uYUqgF73ZO/XHiQa//j4itYuWmyYPT+fN84bWvn4Cp/IKc55/RkuAKDwcTAyBfr3Di460U+HN6I7P9C5
  • tasks:
    • internal.lock_machines:
      • 3
      • vps
    • internal.save_config:
    • internal.check_lock:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: c191413b4b4979b0607fd92ff1097351e48c6bef
      • kdb: True
    • internal.base:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • chef:
    • clock.check:
    • install:
    • ceph:
      • log-whitelist:
        • slow request
        • wrongly marked me down
      • conf:
        • global:
          • ms inject socket failures: 5000
        • osd:
          • debug ms: 1
          • debug journal: 20
          • debug osd: 20
          • osd sloppy crc: True
          • osd op thread timeout: 60
          • debug filestore: 20
        • mon:
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • client:
          • rbd cache: False
      • fs: btrfs
      • sha1: 0ea20e6c51208d6710f469454ab3f964bfa7c9d2
    • exec:
      • client.0:
        • ceph osd pool create cache 4
        • ceph osd tier add rbd cache
        • ceph osd tier cache-mode cache writeback
        • ceph osd tier set-overlay rbd cache
        • ceph osd pool set cache hit_set_type bloom
        • ceph osd pool set cache hit_set_count 8
        • ceph osd pool set cache hit_set_period 60
        • ceph osd pool set cache target_max_objects 250
    • qemu:
      • all:
        • test: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/tiobench.sh
  • teuthology_branch: giant
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2015-01-08 09:37:52
  • started: 2015-01-08 10:25:00
  • updated: 2015-01-08 11:49:05
  • status_class: danger
  • runtime: 1:24:05
  • wait_time: 1:17:22