ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/blogbench}}
Command failed on smithi149 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
ubuntu 22.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cephfs_misc_tests}
"2024-03-26T07:35:02.765256+0000 mon.a (mon.0) 1555 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/suites/dbench}}
Command failed on smithi144 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/ffsb}}
Command failed on smithi073 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
reached maximum tries (51) after waiting for 300 seconds
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
ubuntu 20.04
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/3 tasks/dbench validater/valgrind}
saw valgrind issues
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/fs/norstats}}
Command failed on smithi052 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth}
Test failure: test_mount_mon_and_osd_caps_present_mds_caps_absent (tasks.cephfs.test_multifs_auth.TestClientsWithoutAuth), test_mount_mon_and_osd_caps_present_mds_caps_absent (tasks.cephfs.test_multifs_auth.TestClientsWithoutAuth)
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 9.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} subvol_versions/create_subvol_version_v1 tasks/sessionmap}
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/fsstress}}
Command failed on smithi120 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_20.04} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} subvol_versions/create_subvol_version_v2 tasks/snapshots}
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/reef/{reef} 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
reached maximum tries (51) after waiting for 300 seconds
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/suites/fsx}}
Command failed on smithi006 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 9.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{client-shutdown frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/osd 2-workunit/suites/iozone}}
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/fsync-tester}}
Command failed on smithi043 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/fs/test_o_trunc}}
Command failed on smithi059 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 9.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} subvol_versions/create_subvol_version_v1 tasks/admin}
Test failure: test_multiple_path_r (tasks.cephfs.test_admin.TestFsAuthorize), test_multiple_path_r (tasks.cephfs.test_admin.TestFsAuthorize)
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/iogen}}
Command failed on smithi066 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 9.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{client-shutdown frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mon 2-workunit/suites/ffsb}}
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/suites/iozone}}
Command failed on smithi053 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
ubuntu 22.04
fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-common}
Test failure: test_fscrypt_dummy_encryption_with_quick_group (tasks.cephfs.test_fscrypt.TestFscrypt)
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
ubuntu 22.04
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth}
Test failure: test_mount_mon_and_osd_caps_present_mds_caps_absent (tasks.cephfs.test_multifs_auth.TestClientsWithoutAuth), test_mount_mon_and_osd_caps_present_mds_caps_absent (tasks.cephfs.test_multifs_auth.TestClientsWithoutAuth)
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 9.stream
fs/valgrind/{begin/{0-install 1-ceph 2-logrotate} centos_latest debug mirror/{cephfs-mirror/one-per-cluster clients/mirror cluster/1-node mount/fuse overrides/ignorelist_health tasks/mirror}}
"2024-03-26T08:13:55.378107+0000 mon.a (mon.0) 2159 : cluster [WRN] Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY)" in cluster log
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
ubuntu 22.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{client-shutdown frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/osd 2-workunit/suites/fsstress}}
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/pjd}}
Command failed on smithi088 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/quincy 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client/fuse 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
reached maximum tries (51) after waiting for 300 seconds
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/direct_io}}
Command failed on smithi033 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 9.stream
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_latest} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/fs/misc}}
Command failed on smithi143 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 9.stream
fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-dbench}
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/kernel_untar_build}}
Command failed on smithi113 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
centos 9.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} subvol_versions/create_subvol_version_v1 tasks/forward-scrub}
"2024-03-26T08:13:38.323223+0000 mds.c (mds.0) 1 : cluster [ERR] dir 0x10000000000 object missing on disk; some files may be lost (/dir)" in cluster log
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/postgres}}
Command failed on smithi073 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth}
Test failure: test_mount_mon_and_osd_caps_present_mds_caps_absent (tasks.cephfs.test_multifs_auth.TestClientsWithoutAuth), test_mount_mon_and_osd_caps_present_mds_caps_absent (tasks.cephfs.test_multifs_auth.TestClientsWithoutAuth)
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/blogbench}}
Command failed on smithi026 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'
rishabh-yuri-RunCephCmd-reef
rishabh-yuri-RunCephCmd-reef
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/dbench}}
Command failed on smithi053 with status 2: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume getpath cephfs --group_name qa sv_0'