ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mds 2-workunit/suites/iozone}}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/fs/misc}}
Command failed on smithi035 with status 124: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume create cephfs sv_0 ''"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi049 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 0d548764-4dd5-11ed-8437-001a4aab830c -e sha1=dc59eefe0db3fc40f87705ded1946cb8248b48c4 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/kernel_untar_build}}
Command failed on smithi006 with status 124: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume create cephfs sv_0 ''"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/sessionmap}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/postgres}}
Command failed on smithi132 with status 124: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume create cephfs sv_0 ''"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mon 2-workunit/suites/pjd}}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/blogbench}}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap-schedule}
"1665982492.0701203 mon.a (mon.0) 481 : cluster [WRN] Health check failed: Reduced data availability: 2 pgs peering (PG_AVAILABILITY)" in cluster log
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}}
Command failed on smithi149 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd dump --format=json'
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/suites/dbench}}
Command failed (workunit test suites/dbench.sh) on smithi006 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=dc59eefe0db3fc40f87705ded1946cb8248b48c4 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/dbench.sh'
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap_schedule_snapdir}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{frag ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_trivial_sync}}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/ffsb}}
Command failed on smithi035 with status 124: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume create cephfs sv_0 ''"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_untarbuild_blogbench}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_blogbench traceless/50pc}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/fs/norstats}}
Command failed on smithi055 with status 124: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume create cephfs sv_0 ''"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi130 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5b165f64-4dd9-11ed-8437-001a4aab830c -e sha1=dc59eefe0db3fc40f87705ded1946cb8248b48c4 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
ubuntu 20.04
fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/cephadm/renamevolume/{0-start 1-rename distro/single-container-host overrides/ignorelist_health}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
ubuntu 20.04
fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{ubuntu_latest} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/libcephfs/{frag test}}
Command failed (workunit test libcephfs/test.sh) on smithi040 with status 139: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=dc59eefe0db3fc40f87705ded1946cb8248b48c4 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/libcephfs/test.sh'
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
ubuntu 20.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cephfs_misc_tests}
Test failure: test_file_filesystem_sync_crash (tasks.cephfs.test_misc.TestMisc), test_file_filesystem_sync_crash (tasks.cephfs.test_misc.TestMisc)
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
ubuntu 20.04
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth}
Test failure: test_rw_with_no_fsname_and_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps)
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
rhel 8.6
fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd}
Command failed on smithi132 with status 1: 'sudo yum -y install ceph-radosgw ceph-test ceph ceph-base cephadm ceph-immutable-object-cache ceph-mgr ceph-mgr-dashboard ceph-mgr-diskprediction-local ceph-mgr-rook ceph-mgr-cephadm ceph-fuse ceph-volume librados-devel libcephfs2 libcephfs-devel librados2 librbd1 python3-rados python3-rgw python3-cephfs python3-rbd rbd-fuse rbd-mirror rbd-nbd python3-cephfs cephfs-top cephfs-mirror bison flex elfutils-libelf-devel openssl-devel NetworkManager iproute util-linux libacl-devel libaio-devel libattr-devel libtool libuuid-devel xfsdump xfsprogs xfsprogs-devel libaio-devel libtool libuuid-devel xfsprogs-devel postgresql postgresql-server postgresql-contrib bison flex elfutils-libelf-devel openssl-devel NetworkManager iproute util-linux libacl-devel libaio-devel libattr-devel libtool libuuid-devel xfsdump xfsprogs xfsprogs-devel libaio-devel libtool libuuid-devel xfsprogs-devel postgresql postgresql-server postgresql-contrib bison flex elfutils-libelf-devel openssl-devel NetworkManager iproute util-linux libacl-devel libaio-devel libattr-devel libtool libuuid-devel xfsdump xfsprogs xfsprogs-devel libaio-devel libtool libuuid-devel xfsprogs-devel postgresql postgresql-server postgresql-contrib bison flex elfutils-libelf-devel openssl-devel NetworkManager iproute util-linux libacl-devel libaio-devel libattr-devel libtool libuuid-devel xfsdump xfsprogs xfsprogs-devel libaio-devel libtool libuuid-devel xfsprogs-devel postgresql postgresql-server postgresql-contrib'
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
ubuntu 20.04
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
Cannot connect to remote host smithi087
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/clone}}
Test failure: test_subvolume_clone_retain_snapshot_with_snapshots (tasks.cephfs.test_volumes.TestSubvolumeSnapshotClones)
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snapshots}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/fsstress}}
Command failed on smithi046 with status 124: "sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph fs subvolume create cephfs sv_0 ''"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mon 2-workunit/fs/snaps}}
Command crashed: 'cd -- /home/ubuntu/cephtest/mnt.0 && sudo install -d -m 0755 --owner=ubuntu -- client.0'
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/suites/fsx}}
Cannot connect to remote host smithi035
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/strays}
Test failure: test_open_inode (tasks.cephfs.test_strays.TestStrays)
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
rhel 8.6
fs/verify/{begin/{0-install 1-ceph 2-logrotate} centos_8 clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/5 tasks/fsstress validater/lockdep}
Command failed on smithi153 with status 1: 'sudo yum -y install ceph-radosgw ceph-test ceph ceph-base cephadm ceph-immutable-object-cache ceph-mgr ceph-mgr-dashboard ceph-mgr-diskprediction-local ceph-mgr-rook ceph-mgr-cephadm ceph-fuse ceph-volume librados-devel libcephfs2 libcephfs-devel librados2 librbd1 python3-rados python3-rgw python3-cephfs python3-rbd rbd-fuse rbd-mirror rbd-nbd python3-cephfs cephfs-top cephfs-mirror bison flex elfutils-libelf-devel openssl-devel NetworkManager iproute util-linux libacl-devel libaio-devel libattr-devel libtool libuuid-devel xfsdump xfsprogs xfsprogs-devel libaio-devel libtool libuuid-devel xfsprogs-devel postgresql postgresql-server postgresql-contrib bison flex elfutils-libelf-devel openssl-devel NetworkManager iproute util-linux libacl-devel libaio-devel libattr-devel libtool libuuid-devel xfsdump xfsprogs xfsprogs-devel libaio-devel libtool libuuid-devel xfsprogs-devel postgresql postgresql-server postgresql-contrib bison flex elfutils-libelf-devel openssl-devel NetworkManager iproute util-linux libacl-devel libaio-devel libattr-devel libtool libuuid-devel xfsdump xfsprogs xfsprogs-devel libaio-devel libtool libuuid-devel xfsprogs-devel postgresql postgresql-server postgresql-contrib bison flex elfutils-libelf-devel openssl-devel NetworkManager iproute util-linux libacl-devel libaio-devel libattr-devel libtool libuuid-devel xfsdump xfsprogs xfsprogs-devel libaio-devel libtool libuuid-devel xfsprogs-devel postgresql postgresql-server postgresql-contrib'
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}}
Command failed on smithi037 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph osd dump --format=json'
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/fsync-tester}}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/fs/test_o_trunc}}
SSH connection to smithi012 was lost: 'sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid fdb63c58-4dde-11ed-8437-001a4aab830c --force'
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/test_journal_migration}
Test failure: test_journal_migration (tasks.cephfs.test_journal_migration.TestJournalMigration), test_journal_migration (tasks.cephfs.test_journal_migration.TestJournalMigration)
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/osd 2-workunit/suites/ffsb}}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/iogen}}
Cannot connect to remote host smithi006
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/upgrade/nofs/{bluestore-bitmap centos_latest conf/{client mds mon osd} no-mds-cluster overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-upgrade}}
SSH connection to smithi191 was lost: "if test -f /etc/yum.repos.d/ceph.repo ; then sudo sed -i -e ':a;N;$!ba;s/enabled=1\\ngpg/enabled=1\\npriority=1\\ngpg/g' -e 's;ref/[a-zA-Z0-9_-]*/;sha1/dc59eefe0db3fc40f87705ded1946cb8248b48c4/;g' /etc/yum.repos.d/ceph.repo ; fi"
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/truncate_delay}
Cannot connect to remote host smithi057
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-workunit/suites/iozone}}
{'smithi201.front.sepia.ceph.com': {'changed': False, 'msg': 'Failed to connect to the host via ssh: ssh: connect to host smithi201.front.sepia.ceph.com port 22: No route to host', 'unreachable': True}, 'smithi080.front.sepia.ceph.com': {'_ansible_no_log': False, 'msg': "Failed to connect to the host via ssh: Warning: Permanently added 'smithi080.front.sepia.ceph.com,172.21.15.80' (ECDSA) to the list of known hosts.\r\nubuntu@smithi080.front.sepia.ceph.com: Permission denied (publickey,password,keyboard-interactive)."}, 'smithi003.front.sepia.ceph.com': {'changed': False, 'msg': "Failed to connect to the host via ssh: Warning: Permanently added 'smithi003.front.sepia.ceph.com,172.21.15.3' (ECDSA) to the list of known hosts.\r\nubuntu@smithi003.front.sepia.ceph.com: Permission denied (publickey,password,keyboard-interactive).", 'unreachable': True}}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mds 2-workunit/suites/fsstress}}
{'smithi033.front.sepia.ceph.com': {'_ansible_no_log': False, 'msg': "Failed to connect to the host via ssh: Warning: Permanently added 'smithi033.front.sepia.ceph.com,172.21.15.33' (ECDSA) to the list of known hosts.\r\nubuntu@smithi033.front.sepia.ceph.com: Permission denied (publickey,password,keyboard-interactive)."}, 'smithi153.front.sepia.ceph.com': {'changed': False, 'msg': 'Data could not be sent to remote host "smithi153.front.sepia.ceph.com". Make sure this host can be reached over ssh: ssh: connect to host smithi153.front.sepia.ceph.com port 22: No route to host\r\n', 'unreachable': True}}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_latest k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-workunit/suites/pjd}}
machine smithi055.front.sepia.ceph.com is locked by scheduled_teuthology@teuthology, not scheduled_vshankar@teuthology
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/dir-max-entries}
machine smithi088.front.sepia.ceph.com is locked by scheduled_teuthology@teuthology, not scheduled_vshankar@teuthology
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/direct_io}}
machine smithi046.front.sepia.ceph.com is locked by scheduled_teuthology@teuthology, not scheduled_vshankar@teuthology
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
ubuntu 20.04
fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps}
wip-vshankar-testing1-20221016-220257
wip-vshankar-testing1-20221016-220257
main
smithi
centos 8.stream
fs/workload/{0-centos_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/iogen}}