ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/direct_io}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/fs/misc}}
Command failed (workunit test fs/misc/multiple_rsync.sh) on smithi086 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/misc/multiple_rsync.sh'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/kernel_untar_build}}
Command failed (workunit test kernel_untar_build.sh) on smithi069 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/kernel_untar_build.sh'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/postgres}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/blogbench}}
Command failed on smithi019 with status 22: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 0 ceph --cluster ceph --admin-daemon /var/run/ceph/ceph-mds.d.asok perf dump'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/suites/dbench}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/ffsb}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/fs/norstats}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/fsstress}}
Command failed (workunit test suites/fsstress.sh) on smithi038 with status 135: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/suites/fsx}}
Command failed (workunit test suites/fsx.sh) on smithi090 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsx.sh'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/fsync-tester}}
SELinux denials found on ubuntu@smithi006.front.sepia.ceph.com: ['type=AVC msg=audit(1708500935.538:191): avc: denied { checkpoint_restore } for pid=1160 comm="agetty" capability=40 scontext=system_u:system_r:getty_t:s0-s0:c0.c1023 tcontext=system_u:system_r:getty_t:s0-s0:c0.c1023 tclass=capability2 permissive=1']
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/fs/test_o_trunc}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/iogen}}
SELinux denials found on ubuntu@smithi100.front.sepia.ceph.com: ['type=AVC msg=audit(1708500953.706:193): avc: denied { checkpoint_restore } for pid=1191 comm="agetty" capability=40 scontext=system_u:system_r:getty_t:s0-s0:c0.c1023 tcontext=system_u:system_r:getty_t:s0-s0:c0.c1023 tclass=capability2 permissive=1']
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/suites/iozone}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/pjd}}
Command failed (workunit test suites/pjd.sh) on smithi132 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/direct_io}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/fs/misc}}
Error reimaging machines: reached maximum tries (101) after waiting for 600 seconds
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/kernel_untar_build}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/postgres}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/suites/blogbench}}
Command failed (workunit test suites/blogbench.sh) on smithi026 with status 135: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/blogbench.sh'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/suites/dbench}}
Command failed (workunit test suites/dbench.sh) on smithi136 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/dbench.sh'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/suites/ffsb}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/fs/norstats}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/suites/fsstress}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/suites/fsx}}
Command failed (workunit test suites/fsx.sh) on smithi022 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsx.sh'
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/suites/fsync-tester}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/fs/test_o_trunc}}
SELinux denials found on ubuntu@smithi089.front.sepia.ceph.com: ['type=AVC msg=audit(1708501816.023:194): avc: denied { checkpoint_restore } for pid=1193 comm="agetty" capability=40 scontext=system_u:system_r:getty_t:s0-s0:c0.c1023 tcontext=system_u:system_r:getty_t:s0-s0:c0.c1023 tclass=capability2 permissive=1']
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/suites/iogen}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/suites/iozone}}
SELinux denials found on ubuntu@smithi032.front.sepia.ceph.com: ['type=AVC msg=audit(1708501668.238:193): avc: denied { checkpoint_restore } for pid=1244 comm="agetty" capability=40 scontext=system_u:system_r:getty_t:s0-s0:c0.c1023 tcontext=system_u:system_r:getty_t:s0-s0:c0.c1023 tclass=capability2 permissive=1']
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/random export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/suites/pjd}}
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/direct_io}}
['Failed to manage policy for boolean nagios_run_sudo: [Errno 11] Resource temporarily unavailable']
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{centos_9.stream k-stock} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/no 5-workunit/fs/misc}}
['Data could not be sent to remote host "smithi162.front.sepia.ceph.com". Make sure this host can be reached over ssh: ssh: connect to host smithi162.front.sepia.ceph.com port 22: No route to host', 'Data could not be sent to remote host "smithi181.front.sepia.ceph.com". Make sure this host can be reached over ssh: ssh: connect to host smithi181.front.sepia.ceph.com port 22: No route to host']
wip-vshankar-testing-20240216.060239
wip-vshankar-testing-20240216.060239
main
smithi
centos 9.stream
fs:workload/{0-centos_9.stream begin/{0-install 1-ceph 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{balancer/automatic export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/yes 5-workunit/kernel_untar_build}}
Command failed (workunit test kernel_untar_build.sh) on smithi088 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=e96e3d6862ee9161eb8aa9ba1dc0988359a5c4cb TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/kernel_untar_build.sh'