ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-pdonnell-testing-20240916.200549-debug
wip-pdonnell-testing-20240916.200549-debug
main
smithi
centos 9.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile pg_health} tasks/admin}
wip-pdonnell-testing-20240916.200549-debug
wip-pdonnell-testing-20240916.200549-debug
main
smithi
centos 9.stream
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mgr mon osd} distro/{centos_latest} mount/fuse msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{client-shutdown frag ignorelist_health ignorelist_wrongly_marked_down multifs pg_health session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_suites_pjd}}
Command failed (workunit test suites/pjd.sh) on smithi122 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=ce1b342bf8c6349a8c90cd607f83bff2443f21cf TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'
wip-pdonnell-testing-20240916.200549-debug
wip-pdonnell-testing-20240916.200549-debug
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/fuse objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/postgres}}
Command failed on smithi192 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:ce1b342bf8c6349a8c90cd607f83bff2443f21cf shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5bfc4f56-7483-11ef-bceb-c7b262605968 -- ceph mon dump -f json'
wip-pdonnell-testing-20240916.200549-debug
wip-pdonnell-testing-20240916.200549-debug
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/no 5-quiesce/no 6-workunit/suites/blogbench}}
Command failed on smithi088 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:ce1b342bf8c6349a8c90cd607f83bff2443f21cf shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid df24462c-7483-11ef-bceb-c7b262605968 -- ceph mon dump -f json'
wip-pdonnell-testing-20240916.200549-debug
wip-pdonnell-testing-20240916.200549-debug
main
smithi
centos 9.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_9.stream conf/{client mds mgr mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_upgrade ignorelist_wrongly_marked_down pg-warn pg_health syntax} roles tasks/{0-from/squid 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client/kclient 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
"2024-09-16T23:50:00.000278+0000 mon.smithi164 (mon.0) 131 : cluster [WRN] osd.0 (root=default,host=smithi164) is down" in cluster log
wip-pdonnell-testing-20240916.200549-debug
wip-pdonnell-testing-20240916.200549-debug
main
smithi
centos 9.stream
fs/workload/{0-centos_9.stream begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mgr mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts pg_health session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/yes 5-quiesce/with-quiesce 6-workunit/suites/dbench}}
Command failed on smithi174 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay-quay-quay.apps.os.sepia.ceph.com/ceph-ci/ceph:ce1b342bf8c6349a8c90cd607f83bff2443f21cf shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid c6dba6a4-7484-11ef-bceb-c7b262605968 -- ceph mon dump -f json'