ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
rhel 8.4
fs/functional/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/client-readahead}
Test failure: test_flush (tasks.cephfs.test_readahead.TestReadahead)
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
centos 8.3
fs/cephadm/multivolume/{0-start 1-mount 2-workload/dbench distro/centos_latest}
Command failed on smithi064 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:c9f46754c17c9bc381091a4ebba4aff4ddaf306f pull'
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
centos 8.3
fs/valgrind/{begin centos_latest debug mirror/{cephfs-mirror/one-per-cluster clients/mirror cluster/1-node mount/fuse overrides/whitelist_health tasks/mirror}}
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
ubuntu 20.04
fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} ms_mode/{secure} objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/yes standby-replay tasks/{0-check-counter workunit/kernel_untar_build} wsync/{yes}}
"2021-12-17T09:19:55.273669+0000 mds.g (mds.1) 33 : cluster [WRN] Scrub error on inode 0x10000003f43 (/client.0/tmp/t/linux-5.4/drivers/dca/Kconfig) see mds.g log and `damage ls` output for details" in cluster log
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{pg-warn syntax whitelist_health whitelist_wrongly_marked_down} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-verify} 2-client 3-upgrade-with-workload 4-verify}}
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} overrides/{pg-warn syntax whitelist_health whitelist_wrongly_marked_down} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-verify} 2-client 3-upgrade-with-workload 4-verify}}
Command failed on smithi122 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image docker.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 01dccc3a-5f19-11ec-8c31-001a4aab830c -- bash -c \'ceph --format=json mds versions | jq -e ". | add == 4"\''
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
rhel 8.4
fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp overrides/{frag prefetch_dirfrags/yes races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/1 tasks/{1-thrash/mon 2-workunit/fs/snaps}}
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
centos 8.stream
fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8.stream} mount/fuse ms_mode/{legacy} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/yes standby-replay tasks/{0-check-counter workunit/kernel_untar_build} wsync/{no}}
Command failed (workunit test kernel_untar_build.sh) on smithi043 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=c9f46754c17c9bc381091a4ebba4aff4ddaf306f TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/kernel_untar_build.sh'
wip-vshankar-testing-20211216-114012
wip-vshankar-testing-20211216-114012
master
smithi
rhel 8.4
fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{frag prefetch_dirfrags/yes races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/1 tasks/{1-thrash/mon 2-workunit/fs/snaps}}