Description: upgrade:pacific-p2p/pacific-p2p-stress-split/{0-cluster/{openstack start} 1-ceph-install/pacific. 1.1.short_pg_log 2-partial-upgrade/firsthalf 3-thrash/default 4-workload/{fsx radosbench rbd-cls rbd-import-export rbd_api readwrite snaps-few-objects} 5-finish-upgrade 6-final-workload/{rbd-python snaps-many-objects} objectstore/filestore-xfs supported-all-distro/ubuntu_latest thrashosds-health}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2021-07-04_19:18:15-upgrade:pacific-p2p-pacific-distro-basic-smithi/6252471/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=fd447f7dc65b4f06823d969ab975962a

Failure Reason:

reached maximum tries (500) after waiting for 3000 seconds

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2021-07-04_19:18:15-upgrade:pacific-p2p-pacific-distro-basic-smithi/6252471/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2021-07-04_19:18:15-upgrade:pacific-p2p-pacific-distro-basic-smithi/6252471
  • description: upgrade:pacific-p2p/pacific-p2p-stress-split/{0-cluster/{openstack start} 1-ceph-install/pacific. 1.1.short_pg_log 2-partial-upgrade/firsthalf 3-thrash/default 4-workload/{fsx radosbench rbd-cls rbd-import-export rbd_api readwrite snaps-few-objects} 5-finish-upgrade 6-final-workload/{rbd-python snaps-many-objects} objectstore/filestore-xfs supported-all-distro/ubuntu_latest thrashosds-health}
  • duration: 2:13:13
  • email:
  • failure_reason: reached maximum tries (500) after waiting for 3000 seconds
  • flavor: basic
  • job_id: 6252471
  • kernel:
    • sha1: distro
    • kdb: True
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2021-07-04_19:18:15-upgrade:pacific-p2p-pacific-distro-basic-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 20.04
  • overrides:
    • ceph:
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 283df1071a4c03b56eba700b1fcc721dbaf6bc3d
      • fs: xfs
      • conf:
        • mgr:
          • debug ms: 1
          • debug mgr: 20
        • global:
          • enable experimental unrecoverable data corrupting features: *
          • osd_max_pg_log_entries: 2
          • osd_min_pg_log_entries: 1
        • mon:
          • debug paxos: 20
          • debug mon: 20
          • debug ms: 1
          • mon warn on osd down out interval zero: False
        • osd:
          • debug osd: 20
          • debug ms: 1
          • osd objectstore: filestore
          • osd sloppy crc: True
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
        • overall HEALTH_
        • \(MON_DOWN\)
        • \(MGR_DOWN\)
        • but it is still running
        • wrongly marked me down
        • objects unfound and apparently lost
        • log bound mismatch
        • failed to encode map
        • overall HEALTH_
        • \(OSDMAP_FLAGS\)
        • \(OSD_
        • \(PG_
        • \(POOL_
        • \(CACHE_POOL_
        • \(SMALLER_PGP_NUM\)
        • \(OBJECT_
        • \(SLOW_OPS\)
        • \(REQUEST_SLOW\)
        • \(TOO_FEW_PGS\)
        • slow request
    • ceph-deploy:
      • fs: xfs
      • filestore: True
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd sloppy crc: True
          • osd objectstore: filestore
    • workunit:
      • sha1: 283df1071a4c03b56eba700b1fcc721dbaf6bc3d
      • branch: pacific
    • install:
      • ceph:
        • sha1: 283df1071a4c03b56eba700b1fcc721dbaf6bc3d
    • admin_socket:
      • branch: pacific
  • owner: scheduled_yuriw@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.b', 'mon.c', 'mgr.x', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
    • ['osd.4', 'osd.5', 'osd.6', 'osd.7']
    • ['client.0']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=fd447f7dc65b4f06823d969ab975962a
  • status: fail
  • success: False
  • branch: pacific
  • seed:
  • sha1: 283df1071a4c03b56eba700b1fcc721dbaf6bc3d
  • subset:
  • suite:
  • suite_branch: pacific
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: 283df1071a4c03b56eba700b1fcc721dbaf6bc3d
  • targets:
    • smithi201.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDZmd+w/FQCFkCRMfyOkPOGNP1Dhx7dlK0GXfmRu4Rtv3FxMKGo/rBS6yqOQnQIGNR/XcXvb18PrfOXcayzQDE/WcoetL+TAWcAH9Kn7TaL24UpBl0UJ5s0tXx6rXVImW7/s/sBlWP4GN4H5n/a1PVURCqhP+69xD+KSDw9X7Vt1riuUbt2Xt/UpOPCBBKcjvQXYdXrTutvReVIkcs0opUWAM8BcxZdJGwCpTHSaJNr5ojjDkTg2/O1ueZWrlPnPhhvMa4MqeVRXQtOLnwdqMMq7I93sz33tAoar2eNH9VqKysxCz3e4CtMfkk418DX/gVpQAjK2fXncElSC4Z6J+7OfrRstzpK6P0Ak2Orxj5060H3x6/BbQBnCxJhkstJ1o7OhOguJnnKPdmTaPnNrl1pC+Q/bdAQ//Z9KPk9stJhR8I89qe72AFnw5UR2bw6ZBon8u+U52Q2rBh+r8ftBJCzI4P+QtlPggcPLfLzH1vjNVGDvvK1Lk6Y7oSzEkk1Qdk=
    • smithi027.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDWQS5JJkAWy4YaU9fUwKJdz01V0MJMfvltqLkLyBTA6objWaDDblf8e9DNmlFIaXMdS0xQzA3yxoI7bGCblB3aGg7zSST9fi5QqUVxX5HtfSA/oSsCK6MEzvrqER8ScMe3Le3ckXqpgVPL6oOzJqG5B/ToTEOvIa2NmYkvDit+qhZW7mBKy21uzYT6d5dYqIq8/HBbG0YJGcA/xteSwGr/cyAZz7WwMlU0p4EYuDsPEWRPOYgNlE7A54XatDChegXqMkHn5O8kalUeOKg/M0ykeY0cPDFpaUWMp7mJVIHXKyfHAxZ2F1MVousb8CfZbH2wwoxlstJg0Y76PRYtXoQEgNpYPt/3y1Ydv4ji7Gx0MpuqDPFZ+eGKUag8fSE8LfV/gp5tJRPAm3G1daYPnv9olH8KA4wxb4J78T9lSNBODVzbqozemmDA/AOCQvbEwd9SJ940O07QosO20o6FzQ3xswjF3wyn9mRXxkSDiBrAaYz4BQkr0G3HPpYuQoX67UE=
    • smithi115.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDATA6V+C9T1ZQhU6aJPAdkynZUIZg1ZYLAPjOQTVbmvXKaU12h90yBZpBaO37YDe1/KnzSIOraCXA6WoCUlzutcBzq5JX3H7AEh7qaH0Q1qRfERYpioGaS4KjniKkhSi5G3xubmE/VZba07dRMXmrAQB6uofvhDxH8VQJBsJQE4dAnLvL54GLKOCieQs/I1GbEVSOgtn3rJzR1wetILJ4CBAcD8i2kIfs2T6cRc7mrrdOFH9MSR80UjBGsBpob823Ipb5QMp6VtWqGe3czLDbUpTZlrCutwagwZs6F0mloAcOnWKA0pA9+y3u7N3W18BqUsvULc4Jkbcj5L2PYL4NBcs/TJNt59AXjKw4rBmFdVhF2pM3oLo8an+Wxd3evgptcJi/88T3n0jlpMUitfW5xPVEVwGCPozxYD5HZ5dv5n0Os9hoyr87U9fOdWhB122pSy4l00snI40XOM/2i2o+mAxNxeO7h5jXKgaRKvs2iRGgDf6HoNLof26vKON1+0dU=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • sha1: distro
      • kdb: True
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • tag: v16.2.3
      • extra_packages:
        • librados2
      • exclude_packages:
        • librados3
      • sha1: 283df1071a4c03b56eba700b1fcc721dbaf6bc3d
    • print: **** done install pacific v16.2.3
    • ceph:
    • exec:
      • osd.0:
        • ceph osd require-osd-release pacific
        • ceph osd set-require-min-compat-client pacific
    • print: **** done ceph
    • install.upgrade:
      • osd.0:
    • print: **** done install.upgrade osd.0
    • ceph.restart:
      • daemons:
        • mon.a
        • mon.b
        • mon.c
        • mgr.x
        • osd.0
        • osd.1
        • osd.2
        • osd.3
      • mon-health-to-clog: False
    • print: **** done ceph.restart 1st half
    • parallel:
      • stress-tasks
    • install.upgrade:
      • client.0:
      • osd.4:
    • ceph.restart:
      • daemons:
        • osd.4
        • osd.5
        • osd.6
        • osd.7
      • wait-for-healthy: False
      • wait-for-osds-up: True
    • workunit:
      • clients:
        • client.0:
          • rbd/test_librbd_python.sh
      • tag: v16.2.0
    • print: **** done rbd/test_librbd_python.sh 7-workload
    • rados:
      • op_weights:
        • snap_remove: 50
        • write: 100
        • rollback: 50
        • read: 100
        • snap_create: 50
        • delete: 50
      • clients:
        • client.0
      • write_append_excl: False
      • objects: 500
      • ops: 4000
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2021-07-04 19:18:19
  • started: 2021-07-04 19:18:20
  • updated: 2021-07-04 21:43:54
  • status_class: danger
  • runtime: 2:25:34
  • wait_time: 0:12:21