Description: upgrade:hammer-jewel-x:parallel/{0-cluster/start.yaml 1-hammer-jewel-install/hammer-jewel.yaml 2-workload/sleep.yaml 3-upgrade-sequence/upgrade-all.yaml 3.5-finish.yaml 4-jewel.yaml 5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml 6-workload/sleep.yaml 7-upgrade-sequence/upgrade-by-daemon.yaml 8-kraken.yaml 9-final-workload/{sleep.yaml} distros/centos_7.2.yaml}

Log: http://qa-proxy.ceph.com/teuthology/sage-2016-12-06_18:46:02-upgrade:hammer-jewel-x:parallel-master---basic-smithi/612080/teuthology.log

Failure Reason:

"2016-12-06 19:08:54.238195 osd.0 172.21.15.39:6800/11000 1 : cluster [WRN] map e18 wrongly marked me down" in cluster log

  • log_href: http://qa-proxy.ceph.com/teuthology/sage-2016-12-06_18:46:02-upgrade:hammer-jewel-x:parallel-master---basic-smithi/612080/teuthology.log
  • archive_path: /home/teuthworker/archive/sage-2016-12-06_18:46:02-upgrade:hammer-jewel-x:parallel-master---basic-smithi/612080
  • description: upgrade:hammer-jewel-x:parallel/{0-cluster/start.yaml 1-hammer-jewel-install/hammer-jewel.yaml 2-workload/sleep.yaml 3-upgrade-sequence/upgrade-all.yaml 3.5-finish.yaml 4-jewel.yaml 5-hammer-jewel-x-upgrade/hammer-jewel-x.yaml 6-workload/sleep.yaml 7-upgrade-sequence/upgrade-by-daemon.yaml 8-kraken.yaml 9-final-workload/{sleep.yaml} distros/centos_7.2.yaml}
  • duration: 0:28:43
  • email:
  • failure_reason: "2016-12-06 19:08:54.238195 osd.0 172.21.15.39:6800/11000 1 : cluster [WRN] map e18 wrongly marked me down" in cluster log
  • flavor: basic
  • job_id: 612080
  • kernel:
  • last_in_suite: False
  • machine_type: smithi
  • name: sage-2016-12-06_18:46:02-upgrade:hammer-jewel-x:parallel-master---basic-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 7.2
  • overrides:
    • ceph:
      • log-whitelist:
        • slow request
      • conf:
        • mon:
          • mon debug unsafe allow tier with nonempty snaps: True
          • mon warn on legacy crush tunables: False
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • osd:
          • debug osd: 25
          • debug filestore: 20
          • debug journal: 20
          • debug ms: 1
      • sha1: ebb9f0942c8dd1eb66c2dc93d26e4a210c66c94e
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
          • debug mon: 1
          • debug paxos: 20
          • debug ms: 20
    • workunit:
      • sha1: ebb9f0942c8dd1eb66c2dc93d26e4a210c66c94e
    • install:
      • ceph:
        • sha1: ebb9f0942c8dd1eb66c2dc93d26e4a210c66c94e
    • admin_socket:
      • branch: master
  • owner: scheduled_sage@teuthology
  • pid:
  • roles:
    • ['mon.a', 'osd.0', 'osd.1']
    • ['mon.b', 'mon.c', 'osd.2', 'osd.3']
    • ['client.0', 'client.1']
  • sentry_event:
  • status: fail
  • success: False
  • branch: master
  • seed:
  • sha1: ebb9f0942c8dd1eb66c2dc93d26e4a210c66c94e
  • subset:
  • suite:
  • suite_branch: wip-15753-master
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 48062c0a9c5aea2ecd2060edc20bc625e2be69a6
  • targets:
    • ubuntu@smithi039.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvm25UtPlk84pQTkLlGlbq6M9MVG4ApT3BI4lq12476GwEeVogXeOmXc3Fb+Z0W/0hmTR+Zzfp0EsZaDipfPprnczmS0rOoHpF3f/ltlZdgVEOaz97urUfqAidZsQQU24uEyPIme51EAZ4qAoh0X8N3ATa2Tvk4IrnSG2TNzgjXuUjNKDius3zO0N4FLqJZfscbNVQpZCT4ECQsugeLCH8A7RcFp9P6HSDQgRX/olRay2N1M7by96+Y17bgr8O/LqP3r2cYZBtV+7YWoub8ul74zo6s0jcTXoICyBol78+z6APXsiU9RnaH0qzZihheW/KXVdtSOTXd1QydA11Jf6p
    • ubuntu@smithi018.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUgXLIH608eXc5ym/2MgW7w08y6xi9dZym3Qb0hQuEe5Vw741OwBfUKmFWKk8GlAzlSq6EU0g0EEzCQ2jj+RuqkVbq5jdmGv1icXW+f1N0LsjqI5zuQ6WfA0YAgI8Fo8PVN0rdduRDaxwXcWi8I6WvHb1cnYHrNmDRaj0cNZkeUsFWKgoExsQ3de5CV7nx4C/b/VLo/mrZnWtLFu/FgZdj5s6Vx4a4Ceukwp032CiP1KWpc33aVxV7z0Z04z3ytQAcQ2+6iYm/3TWcLSvom5zp62tayvsHVSOHy9mYI6Q8xr9A0t5iUEClGtDxFY64JzxDJV7BdwbL4siujDrqAcc5
    • ubuntu@smithi007.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpXoSGFLp3CeUNagAokfneznaJ9WX78hfOcxv5QnvqdfSm98M4aOrvyCD+GDN5PaZtapB9yWTcD1Z29dTZSBRH9x99uaxjxOqTXv3blg9lpXSLpZf7kQDdUs7RG618k7whdWhccxT8epQ7f8m7QAvT0HSZdTDT5TlYyfCe47TLpGEaf/P5XJX8ED8okJIWeyrvSwjwUSqQKDzE3WG34e49peZCaNx8PoJBGbzKbHgjx/Y1EM/nkrXv8OghS1acazZ2Zl8eTXc5IUmzPPAYVQPzhuDp+44ox1QJVs99okAYjo8nTPVcAXq4q5ph3hOo/ASCvJsy7EYM0+Mt6enNsB65
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.lock_machines:
      • 3
      • smithi
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock.check:
    • install:
      • sha1: ebb9f0942c8dd1eb66c2dc93d26e4a210c66c94e
      • exclude_packages:
        • ceph-mgr
        • libcephfs2
        • libcephfs-devel
        • libcephfs-dev
      • branch: hammer
    • print: **** done hammer
    • ceph:
      • log-whitelist:
        • slow request
      • cluster: ceph
      • fs: xfs
      • conf:
        • mon:
          • mon debug unsafe allow tier with nonempty snaps: True
          • mon warn on legacy crush tunables: False
          • debug mon: 20
          • debug paxos: 20
          • debug ms: 1
        • osd:
          • debug osd: 25
          • debug filestore: 20
          • debug journal: 20
          • debug ms: 1
      • sha1: ebb9f0942c8dd1eb66c2dc93d26e4a210c66c94e
    • install.upgrade:
      • osd.0:
        • project: ceph
        • branch: jewel
      • osd.2:
        • project: ceph
        • branch: jewel
      • exclude_packages:
        • ceph-mgr
        • libcephfs2
        • libcephfs-devel
        • libcephfs-dev
    • print: *** client.0 upgraded packages to jewel
    • parallel:
      • workload
      • upgrade-sequence
    • print: **** done parallel
    • install.upgrade:
      • client.0:
        • project: ceph
        • branch: jewel
      • exclude_packages:
        • ceph-mgr
        • libcephfs2
        • libcephfs-devel
        • libcephfs-dev
    • exec:
      • osd.0:
        • ceph osd set sortbitwise
        • ceph osd set require_jewel_osds
        • for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
    • install.upgrade:
      • client.0:
        • project: ceph
        • branch: jewel
      • exclude_packages:
        • ceph-mgr
        • libcephfs2
        • libcephfs-devel
        • libcephfs-dev
    • print: **** done install.upgrade client.0 to jewel
    • install.upgrade:
      • osd.0:
      • osd.2:
    • print: **** done install.upgrade daemons to x
    • parallel:
      • workload2
      • upgrade-sequence2
    • print: **** done parallel workload2 and upgrade-sequence2
    • exec:
      • osd.0:
        • ceph osd set require_kraken_osds
    • sleep:
      • duration: 120
  • teuthology_branch: master
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2016-12-06 18:46:23
  • started: 2016-12-06 18:47:07
  • updated: 2016-12-06 19:33:07
  • status_class: danger
  • runtime: 0:46:00
  • wait_time: 0:17:17