Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi135.front.sepia.ceph.com smithi True True 2022-08-11 12:42:52.382535 scheduled_amathuri@teuthology rhel 8.4 x86_64 /home/amathuri/teuthology/archive/amathuri-2022-08-11_12:31:50-rados-main-distro-default-smithi/35
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 6967233 2022-08-11 08:21:38 2022-08-11 08:22:39 2022-08-11 08:52:02 0:29:23 0:19:34 0:09:49 smithi main ubuntu fs:mgr-failover/mgr-failover 1
Failure Reason:

Command failed on smithi135 with status 1: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'declare -a sleeps=( 60 30 30 30 30 60 30 30 30 60 120 ) sleep 300 for ((i = 0; i < ${#sleeps[*]}; i++)); do sleep ${sleeps[$i]} active_mgr=$(ceph mgr dump | jq --raw-output .active_name) ceph mgr fail $active_mgr sleep 5 sudo ceph-mgr -i $active_mgr done sleep 60'"

pass 6966754 2022-08-11 02:25:43 2022-08-11 12:13:31 2022-08-11 12:42:51 0:29:20 0:23:20 0:06:00 smithi main rhel 8.4 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{frag prefetch_dirfrags/no races session_timeout thrashosds-health whitelist_health whitelist_wrongly_marked_down} ranks/5 tasks/{1-thrash/mon 2-workunit/suites/fsstress}} 2
pass 6966634 2022-08-11 02:23:12 2022-08-11 10:02:18 2022-08-11 12:13:53 2:11:35 2:05:05 0:06:30 smithi main centos 8.stream fs/valgrind/{begin/{0-install 1-ceph 2-logrotate} centos_latest debug mirror/{cephfs-mirror/one-per-cluster clients/mirror cluster/1-node mount/fuse overrides/whitelist_health tasks/mirror}} 1
pass 6966573 2022-08-11 02:21:12 2022-08-11 09:14:13 2022-08-11 10:02:20 0:48:07 0:38:18 0:09:49 smithi main ubuntu 20.04 rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-dispatch-delay rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/ec-pool-snaps-few-objects-overwrites} 2
pass 6966542 2022-08-11 02:20:37 2022-08-11 08:51:03 2022-08-11 09:14:52 0:23:49 0:12:09 0:11:40 smithi main ubuntu 20.04 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-stupid rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 6966463 2022-08-11 02:19:05 2022-08-11 07:58:25 2022-08-11 08:22:01 0:23:36 0:16:26 0:07:10 smithi main centos 8.stream rados/cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/rmdir-reactivate} 2
pass 6966422 2022-08-11 02:18:16 2022-08-11 07:31:25 2022-08-11 07:58:42 0:27:17 0:16:44 0:10:33 smithi main ubuntu 20.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/cache} 2
pass 6966389 2022-08-11 02:17:39 2022-08-11 07:08:46 2022-08-11 07:33:04 0:24:18 0:17:40 0:06:38 smithi main rhel 8.4 orch/cephadm/orchestrator_cli/{0-random-distro$/{rhel_8.4_container_tools_3.0} 2-node-mgr agent/on orchestrator_cli} 2
pass 6966331 2022-08-11 02:17:04 2022-08-11 06:22:39 2022-08-11 07:09:41 0:47:02 0:38:51 0:08:11 smithi main rhel 8.4 orch/cephadm/thrash/{0-distro/rhel_8.4_container_tools_3.0 1-start 2-thrash 3-tasks/rados_api_tests fixed-2 msgr/async-v1only root} 2
pass 6966311 2022-08-11 02:16:52 2022-08-11 06:02:29 2022-08-11 06:24:26 0:21:57 0:15:07 0:06:50 smithi main centos 8.stream orch/cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-flag} 2
fail 6966120 2022-08-10 20:38:28 2022-08-10 21:04:24 2022-08-10 21:45:27 0:41:03 0:27:05 0:13:58 smithi main ubuntu 18.04 rados/rook/smoke/{0-distro/ubuntu_18.04 0-kubeadm 1-rook 2-workload/radosbench 3-final cluster/3-node k8s/1.21 net/calico rook/1.6.2} 3
Failure Reason:

'check osd count' reached maximum tries (90) after waiting for 900 seconds

pass 6966083 2022-08-10 19:13:49 2022-08-10 20:08:40 2022-08-10 20:26:12 0:17:32 0:08:46 0:08:46 smithi main ubuntu 20.04 rgw/multifs/{clusters/fixed-2 frontend/beast ignore-pg-availability objectstore/filestore-xfs overrides rgw_pool_type/ec-profile s3tests-branch tasks/rgw_ragweed ubuntu_latest} 2
fail 6965809 2022-08-10 13:43:38 2022-08-10 20:25:54 2022-08-10 21:07:16 0:41:22 0:34:18 0:07:04 smithi main centos 8.stream rgw:verify/{0-install centos_latest clusters/fixed-2 datacache/rgw-datacache frontend/beast ignore-pg-availability msgr-failures/few objectstore/filestore-xfs overrides proto/https rgw_pool_type/ec s3tests-branch sharding$/{default} striping$/{stripe-greater-than-chunk} tasks/{cls ragweed reshard s3tests-java s3tests} validater/lockdep} 2
Failure Reason:

Command failed (s3 tests against rgw) on smithi135 with status 1: "S3TEST_CONF=/home/ubuntu/cephtest/archive/s3-tests.client.0.conf BOTO_CONFIG=/home/ubuntu/cephtest/boto-client.0.cfg REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt /home/ubuntu/cephtest/s3-tests-client.0/virtualenv/bin/python -m nose -w /home/ubuntu/cephtest/s3-tests-client.0 -v -a '!fails_on_rgw,!lifecycle_expiration,!fails_strict_rfc2616,!test_of_sts,!webidentity_test,!fails_with_subdomain,!sse-s3'"

fail 6965514 2022-08-10 04:12:41 2022-08-10 23:08:49 2022-08-11 06:03:22 6:54:33 6:44:55 0:09:38 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 scrub/yes standby-replay subvolume/{with-namespace-isolated} tasks/{0-check-counter workunit/suites/fsstress}} 3
Failure Reason:

Command failed (workunit test suites/fsstress.sh) on smithi007 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6c92256aa8f3896b308f2d3342c1d6de5a3cdd1c TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'

pass 6965491 2022-08-10 04:12:16 2022-08-10 22:27:44 2022-08-10 23:11:32 0:43:48 0:33:55 0:09:53 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10000 overrides/{frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} scrub/no standby-replay subvolume/{with-namespace-isolated-and-quota} tasks/{0-check-counter workunit/direct_io}} 3
pass 6965473 2022-08-10 04:11:57 2022-08-10 21:56:42 2022-08-10 22:28:19 0:31:37 0:25:44 0:05:53 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10 overrides/{frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} scrub/yes standby-replay subvolume/{with-namespace-isolated-and-quota} tasks/{0-check-counter workunit/suites/fsync-tester}} 3
fail 6965468 2022-08-10 04:11:52 2022-08-10 13:31:47 2022-08-10 20:08:43 6:36:56 6:29:58 0:06:58 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10 overrides/{frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} scrub/yes standby-replay subvolume/{with-no-extra-options} tasks/{0-check-counter workunit/suites/fsstress}} 3
Failure Reason:

Command failed (workunit test suites/fsstress.sh) on smithi114 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6c92256aa8f3896b308f2d3342c1d6de5a3cdd1c TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'

pass 6965456 2022-08-10 04:11:40 2022-08-10 12:57:52 2022-08-10 13:32:58 0:35:06 0:21:36 0:13:30 smithi main centos 8.stream fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mon 2-workunit/suites/pjd}} 2
fail 6965442 2022-08-10 04:11:26 2022-08-10 12:25:10 2022-08-10 12:59:55 0:34:45 0:27:23 0:07:22 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/no} objectstore-ec/bluestore-comp omap_limit/10 overrides/{frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} scrub/yes standby-replay subvolume/{with-namespace-isolated-and-quota} tasks/{0-check-counter workunit/suites/pjd}} 3
Failure Reason:

Command failed (workunit test suites/pjd.sh) on smithi094 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=6c92256aa8f3896b308f2d3342c1d6de5a3cdd1c TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'

pass 6965433 2022-08-10 04:11:17 2022-08-10 12:05:50 2022-08-10 12:26:47 0:20:57 0:13:07 0:07:50 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/alternate-pool} 2