Description: rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/filestore-xfs 4-supported-random-distro$/{centos_8} 5-pool/ec-data-pool 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}

Log: http://qa-proxy.ceph.com/teuthology/yuriw-2023-08-21_23:09:58-rbd-pacific-release-distro-default-smithi/7374888/teuthology.log

  • log_href: http://qa-proxy.ceph.com/teuthology/yuriw-2023-08-21_23:09:58-rbd-pacific-release-distro-default-smithi/7374888/teuthology.log
  • archive_path: /home/teuthworker/archive/yuriw-2023-08-21_23:09:58-rbd-pacific-release-distro-default-smithi/7374888
  • description: rbd/migration/{1-base/install 2-clusters/{fixed-3 openstack} 3-objectstore/filestore-xfs 4-supported-random-distro$/{centos_8} 5-pool/ec-data-pool 6-prepare/qcow2-http 7-io-workloads/qemu_xfstests 8-migrate-workloads/execute 9-cleanup/cleanup}
  • duration: 1:30:06
  • email: yweinste@redhat.com
  • failure_reason:
  • flavor: default
  • job_id: 7374888
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuriw-2023-08-21_23:09:58-rbd-pacific-release-distro-default-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: pacific-release
    • ceph:
      • conf:
        • client:
          • rbd default data pool: datapool
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • bluestore block size: 96636764160
          • debug ms: 1
          • debug osd: 20
          • enable experimental unrecoverable data corrupting features: *
          • osd debug randomize hobject sort order: False
          • osd objectstore: bluestore
          • osd shutdown pgref assert: True
          • osd sloppy crc: True
      • flavor: default
      • fs: xfs
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 21b2d401852937440c3da8ce2b19224181760caa
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd:
            • osd objectstore: filestore
            • osd sloppy crc: True
        • filestore: True
        • fs: xfs
      • install:
        • ceph:
          • flavor: default
          • sha1: 21b2d401852937440c3da8ce2b19224181760caa
      • selinux:
        • whitelist:
          • scontext=system_u:system_r:logrotate_t:s0
      • thrashosds:
        • bdev_inject_crash: 2
        • bdev_inject_crash_probability: 0.5
      • workunit:
        • branch: pacific-release
        • sha1: 21b2d401852937440c3da8ce2b19224181760caa
    • owner: scheduled_yuriw@teuthology
    • pid:
    • roles:
      • ['mon.a', 'mon.c', 'mgr.x', 'osd.0', 'osd.1', 'osd.2', 'osd.3']
      • ['mon.b', 'mgr.y', 'osd.4', 'osd.5', 'osd.6', 'osd.7']
      • ['client.0']
    • sentry_event:
    • status: pass
    • success: True
    • branch: pacific-release
    • seed:
    • sha1: 21b2d401852937440c3da8ce2b19224181760caa
    • subset:
    • suite:
    • suite_branch: pacific-release
    • suite_path:
    • suite_relpath:
    • suite_repo:
    • suite_sha1: 21b2d401852937440c3da8ce2b19224181760caa
    • targets:
      • smithi177.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCtreT+SqofWJ1tffUua0Su76yNqUlB/aOG4m5Fy3wlbSeaq12+Hzjm6HU2eLyw0atdy5ZLP2yyhyC5OYZsb1ITER/PgM4gx++JZEZB4/0lpC5Dsi+Jv9+fU77aPouvU2WccpJBjWKGDt2oS7EP9OnOWRsTZl0Vb98MsUU2NWVtZKcGPAaY6wB+f2rBypc5V4OeSKx9P1TYQHSs+Tl2+RiieVjGU1qYDcXASbXZSEhsaKH+T7X4h+m9dO81WrlZyXxAFQBx5p4x5v3IfGlWRCbR+6cCIu4W/yOgPBZZGj+z1WOxjx7CM2ENxt2QVxNoh57a+Qg/vhMuGZzHczsYAZsRt4IwwXFIepxpPkNEvYt3pMVpt49HElu084HhlILkRHES7Ji9D7r90t0U6IcF4B9HeF8X54LF6tLqXXLqzXGdRYYTVd7WCKR0t9+3raTckHVQI8Gz454JPS4ghMkydhngTo4x8W3a/UJ3qPgYVTNgz7JVGp/aeGqIuU74NjOSFsE=
      • smithi183.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDF5aPN5rnK3l/AJrJMpv6W/OMEbmdsIw6teTi+y3QLqUOnUTckeURdVD+1ANCh+EqfYdrP0PGoYbBzSewf6dRbav+lSdBWs4i4reViM3ujG95IVaTcGoQQkWEpjdQ2Lw//nWOKeWGH2+oQGoCAzSbDXGNQDsCzSDxXCkhJoMruGEx+vYIs7D6MN0FhGgliwpFr4crWy9HLgTkJpinH6XN3xLup3z8ynCCI0b8hdhnkLzJqHFbM0W0fC/wruxm+LZw2u6vjuWlh6RlLnPMDYk+u1WC2I9AmoC9oRXU0oRRCyLB3mTrelvIRwz4tyjuUad4gOKHxzw3DS7r7bemSjXGoIBOQ/jm4AccLCQAfusVA4RJ/4UH7HS7T8SEZQvv6QMC6efzF+ytHXolkoaisFeWvecIcieyBS5Lo+xn6GsqFcjr+ftXGygw7hqo3BoVHJ34WPFCBiB7/LBuWhPY3nnx0lM9YD6UJba880Xbk3sv9VOsgYFZgkf6klEl90IiIke8=
      • smithi194.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC7wuIniFbXd7iJ0Lz8VTZ50SsoLfi12KkhjmjuiZKenjblgiP9r/gTVuNdv7Wbg7Gaj8HMGUzQk/qkOcEM7pEm2uVhIPOLRpF5ZGjb4q3CYuC050569E9w6leffO1e6Z5awa7k1u36e1Ghz8Yb+G7PU9fszYxcGn7Mt5PbxzcTK11xC+6oP6g1zimeEmYwvXk+UQB3cpZE96Z9aXPnmybShUB6M00lNV5CCM6T3kVACQ3UmRtE2Rs2DaE1a2WKELRbZYkcapJpULE9kQMfebbV0IwMp1v071tdU2RyVj4QmbOkkkDJvRGjDHv8IFnJDV1nuZvfBr6WvQoBgQU7W5gI8DaPQaX9QJcgqe3Wbfe+OYaFhdL2CoFImuyITQjjtiPAETq6qbFkskYkOKPbA/qPA/W1ZxWm8RZEz06M7A8uZWt2ushIR9qNdzQiFRNmkdEHXjyIsLm3unkqzY7rZQLOSn3TWyoHaREJsY15CLODzW/BeopO3eBXc9vkfM/2u7U=
    • tasks:
      • internal.check_packages:
      • internal.buildpackages_prep:
      • internal.save_config:
      • internal.check_lock:
      • internal.add_remotes:
      • console_log:
      • internal.connect:
      • internal.push_inventory:
      • internal.serialize_remote_roles:
      • internal.check_conflict:
      • internal.check_ceph_data:
      • internal.vm_setup:
      • kernel:
        • kdb: True
        • sha1: distro
      • internal.base:
      • internal.archive_upload:
      • internal.archive:
      • internal.coredump:
      • internal.sudo:
      • internal.syslog:
      • internal.timer:
      • pcp:
      • selinux:
      • ansible.cephlab:
      • clock:
      • install:
      • ceph:
      • exec:
        • client.0:
          • sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
          • sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
          • sudo ceph osd pool set datapool allow_ec_overwrites true
          • rbd pool init datapool
      • exec:
        • client.0:
          • mkdir /home/ubuntu/cephtest/migration
          • qemu-img create -f qcow2 /home/ubuntu/cephtest/migration/empty.qcow2 1G
          • echo '{"type":"qcow","stream":{"type":"http","url":"http://download.ceph.com/qa/ubuntu-12.04.qcow2"}}' | rbd migration prepare --import-only --source-spec-path - client.0.0
          • rbd migration prepare --import-only --source-spec '{"type":"qcow","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/empty.qcow2"}}' client.0.1
          • rbd migration prepare --import-only --source-spec '{"type":"qcow","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/empty.qcow2"}}' client.0.2
      • parallel:
        • io_workload
        • migrate_workload
      • exec:
        • client.0:
          • rm -rf /home/ubuntu/cephtest/migration
    • teuthology_branch: main
    • verbose: True
    • pcp_grafana_url:
    • priority:
    • user:
    • queue:
    • posted: 2023-08-21 23:11:09
    • started: 2023-08-22 01:29:53
    • updated: 2023-08-22 03:09:03
    • status_class: success
    • runtime: 1:39:10
    • wait_time: 0:09:04