Description: rgw:verify/{0-install centos_latest clusters/fixed-2 datacache/rgw-datacache frontend/beast ignore-pg-availability msgr-failures/few objectstore/filestore-xfs overrides proto/https rgw_pool_type/replicated sharding$/{single} striping$/{stripe-equals-chunk} tasks/{cls ragweed reshard s3tests-java s3tests} validater/valgrind}

Log: http://qa-proxy.ceph.com/teuthology/yuvalif-2022-03-09_15:03:47-rgw:verify-wip-arrow-submodule-ext-distro-basic-smithi/6728213/teuthology.log

Sentry event: https://sentry.ceph.com/organizations/ceph/?query=a191576caf4e4a6f910e370809f81ace

Failure Reason:

Command failed (workunit test rgw/run-reshard.sh) on smithi087 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=ccffbb339571237c0a75ec058c40a6b2244cbbf2 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rgw/run-reshard.sh'

  • log_href: http://qa-proxy.ceph.com/teuthology/yuvalif-2022-03-09_15:03:47-rgw:verify-wip-arrow-submodule-ext-distro-basic-smithi/6728213/teuthology.log
  • archive_path: /home/teuthworker/archive/yuvalif-2022-03-09_15:03:47-rgw:verify-wip-arrow-submodule-ext-distro-basic-smithi/6728213
  • description: rgw:verify/{0-install centos_latest clusters/fixed-2 datacache/rgw-datacache frontend/beast ignore-pg-availability msgr-failures/few objectstore/filestore-xfs overrides proto/https rgw_pool_type/replicated sharding$/{single} striping$/{stripe-equals-chunk} tasks/{cls ragweed reshard s3tests-java s3tests} validater/valgrind}
  • duration: 0:23:03
  • email: gsalomon@redhat.com
  • failure_reason: Command failed (workunit test rgw/run-reshard.sh) on smithi087 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=ccffbb339571237c0a75ec058c40a6b2244cbbf2 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rgw/run-reshard.sh'
  • flavor: default
  • job_id: 6728213
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: yuvalif-2022-03-09_15:03:47-rgw:verify-wip-arrow-submodule-ext-distro-basic-smithi
  • nuke_on_error: True
  • os_type: centos
  • os_version: 8.stream
  • overrides:
    • admin_socket:
      • branch: wip-arrow-submodule-ext
    • ceph:
      • conf:
        • client:
          • debug rgw: 20
          • rgw crypt require ssl: False
          • rgw crypt s3 kms backend: testing
          • rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
          • rgw d3n l1 datacache persistent path: /tmp/rgw_datacache/
          • rgw d3n l1 datacache size: 10737418240
          • rgw d3n l1 local datacache enabled: True
          • rgw enable ops log: True
          • rgw lc debug interval: 10
          • setgroup: ceph
          • setuser: ceph
        • global:
          • mon client directed command retry: 5
          • ms inject socket failures: 5000
          • osd heartbeat grace: 40
          • osd_max_pg_log_entries: 10
          • osd_min_pg_log_entries: 10
          • rgw override bucket index max shards: 1
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
          • mon osd crush smoke test: False
        • osd:
          • debug filestore: 20
          • debug journal: 20
          • debug ms: 20
          • debug osd: 25
          • osd fast shutdown: False
          • osd objectstore: filestore
          • osd shutdown pgref assert: True
          • osd sloppy crc: True
      • fs: xfs
      • log-ignorelist:
        • \(PG_AVAILABILITY\)
        • \(PG_DEGRADED\)
        • \(OSD_SLOW_PING_TIME
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: d3346521cf7832dd707d9a358851a95ee2c59790
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
        • osd:
          • osd objectstore: filestore
          • osd sloppy crc: True
      • filestore: True
      • fs: xfs
    • install:
      • ceph:
        • sha1: d3346521cf7832dd707d9a358851a95ee2c59790
    • openssl_keys:
      • rgw.client.0:
        • ca: root
        • client: client.0
        • embed-key: True
      • root:
        • client: client.0
        • cn: teuthology
        • install:
          • client.0
        • key-type: rsa:4096
    • rgw:
      • client.0:
        • ssl certificate: rgw.client.0
        • valgrind:
          • --tool=memcheck
          • --max-threads=1024
      • compression type: random
      • datacache: True
      • datacache_path: /tmp/rgw_datacache
      • ec-data-pool: False
      • frontend: beast
      • storage classes: LUKEWARM, FROZEN
    • selinux:
      • whitelist:
        • scontext=system_u:system_r:logrotate_t:s0
    • workunit:
      • branch: parquet_s3tests
      • sha1: ccffbb339571237c0a75ec058c40a6b2244cbbf2
  • owner: scheduled_yuvalif@teuthology
  • pid:
  • roles:
    • ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a']
    • ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'node-exporter.b']
  • sentry_event: https://sentry.ceph.com/organizations/ceph/?query=a191576caf4e4a6f910e370809f81ace
  • status: fail
  • success: False
  • branch: wip-arrow-submodule-ext
  • seed:
  • sha1: d3346521cf7832dd707d9a358851a95ee2c59790
  • subset:
  • suite:
  • suite_branch: parquet_s3tests
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: ccffbb339571237c0a75ec058c40a6b2244cbbf2
  • targets:
    • smithi087.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDePaPeyEFAUGZT67TwiQ4bV5Yojwab4kTEvKpj8ACCMyrY6OLgI4tPfAHTb3A5cXsdibd5lM/pqZAtAUDHxo309rm8gZLHihknrXTkxK+nwHhdJ5uvgaBsN/3Ws3DQcGFMWa+FrEOOO+Qm7Eqd2+LNbn5hXhP+U//SPs1KbR61cR7COGJWpZuGIMbBQ+tucFg7P5p+FqsZIRXX//nObB/NFb5I28HY2g7a2gyQHOONPP/bLJfF4+6tHgvpIFtonlQruUnDE4C8QBHbExpPUWMjFdcA6vTpzPQ7nW3TNhEyiACa4OyjI0l7wMUXyHtkr2FZdqX6/yYEDsK9mkDRan/MU3XhVX0PWmhmJpuK5ZdXDHV9rPKxT1YaNGrWn2g25j7YGkf9eAIpnTj7Wto5F8MZ5biDQXZXbY7pFUr68ON9otR7urUDGXpF+GkeNu/TbHXDJXvw/lImezpThT2BfU5nSITgOcaJTUuBHESAX18/X0TawW9kNTH+NtUhbFNid5M=
    • smithi176.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDJ8Agyb9qci4GcFbqBG+d3LwspOlS3Bven3Sv/UIFb6MKKSISidgIpDpEU/Q7AuBbU7PTS4Dbca4eNbA+tn5meE/ItMk5kaD5Bsw1KRFn7DBKRgZwNIP0XbZlOFv7+R4bRKvR2JVIJnpiaTRgniP4HEbeR8b39RbecUsCBchIafwDk2UqzU5AnPYDgPIEZvOiq27UfLaazPi3kjNaPTsFe2r2BQTJPsWMHYNc0kwwjMPTiWd2b0SJsYivGdNfq8fCbYQplvYrVLfYgEYnzjyV0MtkrEaD+Ud/vbpZLebZzAa4cfirmEVBI7Q4MIh6ViE1fwXfMld3RH7M2JfnZRYHWIHNkQcxBqoryjy4MGDjSh7ZE+mSjBlzrtnbbzxgnuxp72hs8kwjXwhgB++lCAHehjuVHLiDGP9hYacwWSI2lpkcqusR0P/SwWbpn6VV7wX41M8OSfxPd4YzlH/RM86ycmpzSn4SEQhfDHuNwe66k2DQObDF+amWjwMFeZ7RLlhs=
  • tasks:
    • internal.check_packages:
    • internal.buildpackages_prep:
    • internal.save_config:
    • internal.check_lock:
    • internal.add_remotes:
    • console_log:
    • internal.connect:
    • internal.push_inventory:
    • internal.serialize_remote_roles:
    • internal.check_conflict:
    • internal.check_ceph_data:
    • internal.vm_setup:
    • kernel:
      • kdb: True
      • sha1: distro
    • internal.base:
    • internal.archive_upload:
    • internal.archive:
    • internal.coredump:
    • internal.sudo:
    • internal.syslog:
    • internal.timer:
    • pcp:
    • selinux:
    • ansible.cephlab:
    • clock:
    • install:
      • extra_system_packages:
        • deb:
          • s3cmd
        • rpm:
          • s3cmd
      • sha1: d3346521cf7832dd707d9a358851a95ee2c59790
    • ceph:
    • openssl_keys:
    • rgw:
      • client.0:
        • ssl certificate: rgw.client.0
        • valgrind:
          • --tool=memcheck
          • --max-threads=1024
    • workunit:
      • clients:
        • client.0:
          • rgw/run-datacache.sh
      • env:
        • RGW_DATACACHE_PATH: /tmp/rgw_datacache
      • branch: parquet_s3tests
      • sha1: ccffbb339571237c0a75ec058c40a6b2244cbbf2
    • workunit:
      • clients:
        • client.0:
          • cls/test_cls_lock.sh
          • cls/test_cls_log.sh
          • cls/test_cls_refcount.sh
          • cls/test_cls_rgw.sh
          • cls/test_cls_rgw_gc.sh
          • cls/test_cls_rgw_stats.sh
          • cls/test_cls_cmpomap.sh
          • cls/test_cls_2pc_queue.sh
          • rgw/test_rgw_gc_log.sh
          • rgw/test_rgw_obj.sh
          • rgw/test_rgw_throttle.sh
          • rgw/test_librgw_file.sh
      • branch: parquet_s3tests
      • sha1: ccffbb339571237c0a75ec058c40a6b2244cbbf2
    • ragweed:
      • client.0:
        • default-branch: ceph-master
        • rgw_server: client.0
        • stages: prepare,check
    • workunit:
      • clients:
        • client.0:
          • rgw/run-reshard.sh
      • branch: parquet_s3tests
      • sha1: ccffbb339571237c0a75ec058c40a6b2244cbbf2
    • s3tests-java:
      • client.0:
        • force-branch: ceph-master
        • force-repo: https://github.com/ceph/java_s3tests.git
    • s3tests:
      • client.0:
        • force-branch: parquet_s3tests
        • git_remote: https://github.com/galsalomon66/
        • rgw_server: client.0
  • teuthology_branch: master
  • verbose: False
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2022-03-09 15:04:05
  • started: 2022-03-09 15:10:07
  • updated: 2022-03-09 15:43:11
  • status_class: danger
  • runtime: 0:33:04
  • wait_time: 0:10:01