ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/fs/norstats}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap-schedule}
Command failed on smithi106 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_suites_pjd}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/upgraded_client/from_nautilus/{bluestore-bitmap centos_latest clusters/{1-mds-1-client-micro} conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-nautilus 1-client-upgrade 2-client-sanity}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-iozone}
Command failed on smithi158 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap_schedule_snapdir}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mon 2-workunit/suites/ffsb}}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_misc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/fsstress}}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snapshots}
Test failure: test_allow_new_snaps_config (tasks.cephfs.test_snapshots.TestSnapshots), test_allow_new_snaps_config (tasks.cephfs.test_snapshots.TestSnapshots)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_dbench_iozone}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_dbench traceless/50pc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_trivial_sync}}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.1 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.1 && cd /home/ubuntu/cephtest/clone.client.1 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/strays}
Command failed on smithi027 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/ior-shared-file}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth}
Test failure: test_r_with_fsname_and_no_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps), test_r_with_fsname_and_no_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/misc}}
Test failure: test_binary_metafile_on_legacy_to_v1_upgrade (tasks.cephfs.test_volumes.TestMisc), test_binary_metafile_on_legacy_to_v1_upgrade (tasks.cephfs.test_volumes.TestMisc)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}}
"2023-01-01T17:55:31.513561+0000 mgr.x (mgr.14100) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/test_journal_migration}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/osd 2-workunit/suites/fsstress}}
Command failed on smithi150 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/fsx}}
Command failed on smithi079 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/volumes/_nogroup/sv_1/084104ec-f837-4b7b-b95b-3d21984a55b9 /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs,ms_mode=legacy,nowsync'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/truncate_delay}
Command crashed: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'cd $TESTDIR/mnt.0 && dd if=/dev/zero of=./foo count=100'"
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mds 2-workunit/fs/trivial_sync}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi005 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5338699e-89fd-11ed-90c7-001a4aab830c -e sha1=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/dir-max-entries}
Command failed on smithi093 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/fsync-tester}}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/ffsb}}
Command failed on smithi087 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/quota}
Command failed (workunit test fs/quota/quota.sh) on smithi154 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 CEPH_MNT=/home/ubuntu/cephtest/mnt.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/fs/quota/quota.sh'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-comp overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{rhel_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/libcephfs_python}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/3 tasks/fsstress validater/lockdep}
Command failed (workunit test suites/fsstress.sh) on smithi073 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mon 2-workunit/suites/iozone}}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/acls}
Test failure: test_acls (tasks.cephfs.test_acls.TestACLs), test_acls (tasks.cephfs.test_acls.TestACLs)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{no-subvolume} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/fs/test_o_trunc}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/iozone}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/admin}
Command failed on smithi086 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-pjd}
Command failed on smithi175 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/osd 2-workunit/suites/pjd}}
Command failed on smithi063 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/alternate-pool}
Test failure: test_rebuild_simple (tasks.cephfs.test_recovery_pool.TestRecoveryPool)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/mdtest}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover}
Command failed on smithi064 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
Command failed on smithi016 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/snapshot}}
Command failed on smithi100 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
 
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/testing/k-testing ms-die-on-skipped} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_untarbuild_blogbench}
{'smithi153.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.696723', 'end': '2023-01-01 17:51:22.512273', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2023-01-01 17:51:17.815550', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2515 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2515 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}, 'smithi044.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.754114', 'end': '2023-01-01 17:51:22.824218', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2023-01-01 17:51:18.070104', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2515 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2515 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_ffsb traceless/50pc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/iogen}}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/asok_dump_tree}
Test failure: test_basic (tasks.cephfs.test_dump_tree.TestDumpTree)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_snaptests}}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}}
"2023-01-01T18:07:10.319805+0000 mgr.y (mgr.14105) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/auto-repair}
Command failed on smithi008 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/osd 2-workunit/fs/snaps}}
Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi101 with status 128: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_misc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/backtrace}
Test failure: test_backtrace (tasks.cephfs.test_backtrace.TestBacktrace)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/iozone}}
Command failed on smithi092 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/volumes/_nogroup/sv_1/4847eda0-24d2-4eb1-8f45-23a5e42559b8 /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,ms_mode=legacy,nowsync,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi043 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4aca6b60-8a00-11ed-90c7-001a4aab830c -e sha1=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_suites_fsstress}}
Command failed on smithi111 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.a=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/cap-flush}
{'smithi120.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}, 'smithi106.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mds 2-workunit/suites/ffsb}}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client-limits}
Command failed on smithi044 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/pjd}}
Error reimaging machines: 'NoneType' object has no attribute '_fields'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/5 tasks/dbench validater/valgrind}
Command failed on smithi139 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mon 2-workunit/suites/fsstress}}
Command failed on smithi105 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client-readahead}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_suites_pjd}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client-recovery}
Test failure: test_basic (tasks.cephfs.test_client_recovery.TestClientRecovery), test_basic (tasks.cephfs.test_client_recovery.TestClientRecovery)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/bugs/client_trim_caps/{begin/{0-install 1-ceph 2-logrotate} centos_latest clusters/small-cluster conf/{client mds mon osd} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/trim-i24137}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/cephadm/multivolume/{0-start 1-mount 2-workload/dbench distro/single-container-host}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-common}
Command failed on smithi129 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/full/{begin/{0-install 1-ceph 2-logrotate} clusters/1-node-1-mds-1-osd conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mgr-osd-full}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{centos_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/mirror/{begin/{0-install 1-ceph 2-logrotate} cephfs-mirror/one-per-cluster clients/{mirror} cluster/{1-node} mount/fuse objectstore/bluestore-bitmap overrides/{whitelist_health} supported-random-distros$/{rhel_8} tasks/mirror}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/mirror-ha/{begin/{0-install 1-ceph 2-logrotate} cephfs-mirror/three-per-cluster clients/{mirror} cluster/{1-node} objectstore/bluestore-bitmap overrides/{whitelist_health} supported-random-distro$/{ubuntu_latest} workloads/cephfs-mirror-ha-workunit}
reached maximum tries (50) after waiting for 300 seconds
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cephfs_misc_tests}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover}
Test failure: test_join_fs_vanilla (tasks.cephfs.test_failover.TestClusterAffinity)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_misc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/shell/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/cephfs-shell}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/top/{begin/{0-install 1-ceph 2-logrotate} cluster/{1-node} mount/fuse objectstore/bluestore-bitmap overrides/ignorelist_health supported-random-distros$/{rhel_8} tasks/fstop}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/valgrind/{begin/{0-install 1-ceph 2-logrotate} centos_latest debug mirror/{cephfs-mirror/one-per-cluster clients/mirror cluster/1-node mount/fuse overrides/whitelist_health tasks/mirror}}
Test failure: test_cephfs_mirror_restart_sync_on_blocklist (tasks.cephfs.test_mirroring.TestMirroring)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/basic}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/direct_io}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/damage}
Command failed on smithi063 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/osd 2-workunit/fs/trivial_sync}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_dbench_iozone}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress traceless/50pc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/data-scan}
Test failure: test_parallel_execution (tasks.cephfs.test_data_scan.TestDataScan)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_trivial_sync}}
{'smithi157.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/nofs/{bluestore-bitmap centos_latest conf/{client mds mon osd} no-mds-cluster overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-upgrade}}
"2023-01-01T18:28:29.879620+0000 mgr.x (mgr.4109) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/fs/misc}}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/exports}
{'smithi016.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}, 'smithi018.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mds 2-workunit/suites/iozone}}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/forward-scrub}
Command failed on smithi110 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/upgraded_client/from_nautilus/{bluestore-bitmap centos_latest clusters/{1-mds-1-client-micro} conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-nautilus 1-client-upgrade 2-client-sanity}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/ffsb}}
Command failed on smithi090 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.a=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/fragment}
Test failure: test_deep_split (tasks.cephfs.test_fragment.TestFragmentation)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{no-subvolume} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/kernel_untar_build}}
Command failed on smithi079 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs,ms_mode=legacy,nowsync'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mon 2-workunit/suites/pjd}}
Command failed on smithi073 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi116 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 96eade46-8a03-11ed-90c7-001a4aab830c -e sha1=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/journal-repair}
{'smithi085.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}, 'smithi060.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/iozone}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/1 tasks/fsstress validater/lockdep}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mds-flush}
Command failed on smithi035 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}}
"2023-01-01T18:44:28.400328+0000 mgr.y (mgr.14122) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/postgres}}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/ior-shared-file}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth}
Test failure: test_r_with_fsname_and_no_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps), test_r_with_fsname_and_no_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/clone}}
Test failure: test_clone_failure_status_failed (tasks.cephfs.test_volumes.TestSubvolumeSnapshotClones), test_clone_failure_status_failed (tasks.cephfs.test_volumes.TestSubvolumeSnapshotClones)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mon 2-workunit/fs/snaps}}
Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi105 with status 128: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mds-full}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-dbench}
Command failed on smithi098 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mds_creation_retry}
{'smithi085.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}, 'smithi060.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/osd 2-workunit/suites/ffsb}}
{'smithi043.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_dbench_iozone}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_blogbench traceless/50pc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/blogbench}}
error during scrub thrashing: Command failed on smithi088 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph tell mds.1:1 damage ls'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/metrics}
Command failed on smithi063 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_snaptests}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/multimds_misc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-comp overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{rhel_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/ino_release_cb}
{'smithi017.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_misc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mds 2-workunit/suites/fsstress}}
Command failed on smithi012 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/dbench}}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/openfiletable}
{'smithi060.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_suites_fsstress}}
{'smithi139.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/pool-perm}
Command failed on smithi064 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mon 2-workunit/fs/trivial_sync}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/quota}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/ffsb}}
{'smithi043.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/mdtest}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover}
Command failed on smithi153 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
Command failed on smithi005 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/misc}}
Command failed on smithi035 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_suites_pjd}}
Command failed on smithi099 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/recovery-fs}
Test failure: test_recover_fs_after_fsmap_removal (tasks.cephfs.test_recovery_fs.TestFSRecovery), test_recover_fs_after_fsmap_removal (tasks.cephfs.test_recovery_fs.TestFSRecovery)
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/3 tasks/dbench validater/valgrind}
Command failed on smithi040 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi055 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 32ba6f54-8a08-11ed-90c7-001a4aab830c -- ceph mon dump -f json'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/scrub}
Command failed on smithi136 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/osd 2-workunit/suites/iozone}}
{'smithi114.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/fs/norstats}}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}}
"2023-01-01T19:15:52.771715+0000 mgr.x (mgr.14098) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/sessionmap}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mds 2-workunit/suites/pjd}}
Command failed on smithi116 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
 
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/testing/k-testing ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_untarbuild_blogbench}
{'smithi150.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.896027', 'end': '2023-01-01 19:04:18.080024', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2023-01-01 19:04:13.183997', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2515 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2515 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}, 'smithi007.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:05.044847', 'end': '2023-01-01 19:04:18.199213', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2023-01-01 19:04:13.154366', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2515 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2515 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_dbench traceless/50pc}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-ffsb}
Command failed on smithi203 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap-schedule}
{'smithi098.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_trivial_sync}}
{'smithi017.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/fsstress}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap_schedule_snapdir}
Command failed on smithi100 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,snapdirname=.customsnapkernel,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mds 2-workunit/fs/snaps}}
Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi063 with status 128: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snapshots}
Found coredumps on ubuntu@smithi079.front.sepia.ceph.com
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/ffsb}}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/fsx}}
hit max job timeout
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/strays}
{'smithi182.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi093 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 989464ae-8a0a-11ed-90c7-001a4aab830c -e sha1=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/cephadm/renamevolume/{0-start 1-rename distro/single-container-host overrides/ignorelist_health}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{rhel_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/libcephfs/{test}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cephfs_misc_tests}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/snapshot}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mon 2-workunit/suites/ffsb}}
Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/test_journal_migration}
Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/fsync-tester}}
Command failed on smithi153 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/volumes/_nogroup/sv_1/baa5fa5f-e40d-4556-9afa-d230462ea0dd /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs,ms_mode=legacy,nowsync'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/truncate_delay}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}}
timeout expired in wait_until_healthy
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/iozone}}
Command failed on smithi110 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=a'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/5 tasks/fsstress validater/lockdep}
Command failed (workunit test suites/fsstress.sh) on smithi099 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/dir-max-entries}
{'smithi102.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/osd 2-workunit/suites/fsstress}}
Command failed on smithi092 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'
wip-vshankar-testing-20221221.044733
wip-vshankar-testing-20221221.044733
main
smithi
centos 8.stream
fs/upgrade/nofs/{bluestore-bitmap centos_latest conf/{client mds mon osd} no-mds-cluster overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-upgrade}}
"2023-01-01T19:39:04.808476+0000 mgr.y (mgr.4099) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log