ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
rhel 8.4
fs/functional/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/journal-repair}
"2022-03-23T17:40:58.356763+0000 mds.c (mds.1) 1 : cluster [ERR] error reading sessionmap 'mds1_sessionmap' -2 ((2) No such file or directory)" in cluster log
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
centos 8.stream
fs/volumes/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/volumes/{overrides test/misc}}
Test failure: test_subvolume_no_upgrade_v1_sanity (tasks.cephfs.test_volumes.TestMisc)
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
rhel 8.4
fs/functional/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/metrics}
Test failure: test_perf_stats_stale_metrics (tasks.cephfs.test_mds_metrics.TestMDSMetrics)
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
centos 8.stream
fs/functional/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/multimds_misc}
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
rhel 8.4
fs/multifs/{begin clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{mon-debug whitelist_health whitelist_wrongly_marked_down} tasks/multifs-auth}
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
centos 8.stream
fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-comp omap_limit/10 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/5 scrub/yes standby-replay tasks/{0-check-counter workunit/fs/misc} wsync/{no}}
hit max job timeout
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
rhel 8.4
fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{frag prefetch_dirfrags/no races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/1 tasks/{1-thrash/mon 2-workunit/fs/snaps}}
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
ubuntu 20.04
fs/functional/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/snap-schedule}
"2022-03-23T17:49:36.894616+0000 mon.a (mon.0) 357 : cluster [WRN] Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY)" in cluster log
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
centos 8.stream
fs/functional/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/strays}
Test failure: test_dirfrag_limit (tasks.cephfs.test_strays.TestStrays)
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
centos 8.stream
fs/thrash/workloads/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{frag prefetch_dirfrags/no races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/5 tasks/{1-thrash/mds 2-workunit/suites/fsstress}}
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
ubuntu 18.04
fs/upgrade/volumes/import-legacy/{bluestore-bitmap clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{pg-warn whitelist_health whitelist_wrongly_marked_down} tasks/{0-nautilus 1-client 2-upgrade 3-verify} ubuntu_18.04}
Command failed on smithi100 with status 1: "sudo nsenter --net=/var/run/netns/ceph-ns-mnt.0 sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage daemon-helper kill ceph-fuse -f --admin-socket '/var/run/ceph/$cluster-$name.$pid.asok' --id vol_data_isolated --client_mountpoint=/volumes/_nogroup/vol_isolated mnt.0"
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
ubuntu 20.04
fs/workload/{begin clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore-ec/bluestore-ec-root omap_limit/10000 overrides/{frag osd-asserts session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/3 scrub/no standby-replay tasks/{0-check-counter workunit/fs/norstats} wsync/{yes}}
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
ubuntu 20.04
fs/functional/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/client-readahead}
Test failure: test_flush (tasks.cephfs.test_readahead.TestReadahead)
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
centos 8.stream
fs/thrash/multifs/{begin clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount overrides/{distro/testing/{flavor/centos_latest k-testing} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{frag multifs session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_snaptests}}
Command failed (workunit test fs/snaps/snaptest-multiple-capsnaps.sh) on smithi151 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e16c8a595ca0eb496325a6f9b5296e8c663cd8c7 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh'
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
ubuntu 20.04
fs/volumes/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount overrides/{distro/testing/{flavor/ubuntu_latest k-testing} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/volumes/{overrides test/misc}}
Test failure: test_subvolume_no_upgrade_v1_sanity (tasks.cephfs.test_volumes.TestMisc)
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
ubuntu 20.04
fs/functional/{begin clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{no_client_pidfile whitelist_health whitelist_wrongly_marked_down} tasks/forward-scrub}
wip-yuri4-testing-2022-03-21-1648-pacific
wip-yuri4-testing-2022-03-21-1648-pacific
master
smithi
rhel 8.4
fs/verify/{begin clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-bitmap overrides/{mon-debug session_timeout whitelist_health whitelist_wrongly_marked_down} ranks/1 tasks/fsstress validater/valgrind}
SSH connection to smithi002 was lost: 'sudo rm -rf -- /home/ubuntu/cephtest/workunits.list.client.0 /home/ubuntu/cephtest/clone.client.0'