Name Machine Type Up Locked Locked Since Locked By OS Type OS Version Arch Description
smithi131.front.sepia.ceph.com smithi True True 2023-01-31 08:17:58.551513 scheduled_yuriw@teuthology rhel 8.6 x86_64 /home/teuthworker/archive/yuriw-2023-01-30_23:06:08-rbd-wip-yuri6-testing-2023-01-26-0941-distro-default-smithi/7144213
Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7145164 2023-01-31 05:02:07 2023-01-31 06:38:36 2023-01-31 07:14:52 0:36:16 0:18:53 0:17:23 smithi main centos 8.stream smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{centos_8} tasks/{0-install test/rbd_workunit_suites_iozone}} 3
running 7144213 2023-01-30 23:07:27 2023-01-31 08:17:58 2023-01-31 19:19:57 15:34:02 smithi main rhel 8.6 rbd/cli/{base/install clusters/{fixed-1 openstack} features/layering msgr-failures/few objectstore/bluestore-comp-zstd pool/small-cache-pool supported-random-distro$/{rhel_8} workloads/rbd_cli_migration} 1
pass 7144201 2023-01-30 23:07:16 2023-01-31 07:24:57 2023-01-31 08:13:42 0:48:45 0:20:42 0:28:03 smithi main ubuntu 20.04 rbd/librbd/{cache/writethrough clusters/{fixed-3 openstack} config/permit-partial-discard min-compat-client/default msgr-failures/few objectstore/bluestore-comp-snappy pool/small-cache-pool supported-random-distro$/{ubuntu_latest} workloads/python_api_tests_with_journaling} 3
pass 7144189 2023-01-30 23:07:04 2023-01-31 07:12:32 2023-01-31 07:34:50 0:22:18 0:09:25 0:12:53 smithi main ubuntu 20.04 rbd/basic/{base/install cachepool/small clusters/{fixed-1 openstack} msgr-failures/few objectstore/bluestore-comp-zstd supported-random-distro$/{ubuntu_latest} tasks/rbd_lock_and_fence} 1
pass 7144173 2023-01-30 23:06:56 2023-01-31 04:23:14 2023-01-31 05:49:48 1:26:34 1:12:10 0:14:24 smithi main ubuntu 20.04 rbd/pwl-cache/tmpfs/{1-base/install 2-cluster/{fix-2 openstack} 3-supported-random-distro$/{ubuntu_latest} 4-cache-path 5-cache-mode/ssd 6-cache-size/1G 7-workloads/qemu_xfstests} 2
pass 7144166 2023-01-30 23:06:53 2023-01-31 04:01:49 2023-01-31 04:27:30 0:25:41 0:11:23 0:14:18 smithi main krbd/rbd-nomount/{bluestore-bitmap clusters/fixed-3 conf install/ceph ms_mode/secure msgr-failures/many tasks/rbd_image_read} 3
pass 7144158 2023-01-30 23:06:49 2023-01-31 03:38:04 2023-01-31 04:05:44 0:27:40 0:11:25 0:16:15 smithi main krbd/rbd-nomount/{bluestore-bitmap clusters/fixed-3 conf install/ceph ms_mode/legacy$/{legacy} msgr-failures/few tasks/rbd_huge_tickets} 3
pass 7144146 2023-01-30 23:06:43 2023-01-31 03:18:38 2023-01-31 03:42:02 0:23:24 0:12:00 0:11:24 smithi main krbd/rbd-nomount/{bluestore-bitmap clusters/fixed-3 conf install/ceph ms_mode/secure msgr-failures/few tasks/krbd_udev_symlinks} 3
pass 7144140 2023-01-30 23:06:38 2023-01-31 02:52:52 2023-01-31 03:20:17 0:27:25 0:11:36 0:15:49 smithi main krbd/rbd/{bluestore-bitmap clusters/fixed-3 conf ms_mode/crc$/{crc} msgr-failures/many tasks/rbd_workunit_suites_fsstress} 3
pass 7144133 2023-01-30 23:06:31 2023-01-31 02:35:18 2023-01-31 02:58:24 0:23:06 0:11:54 0:11:12 smithi main krbd/rbd-nomount/{bluestore-bitmap clusters/fixed-3 conf install/ceph ms_mode/legacy$/{legacy} msgr-failures/few tasks/krbd_namespaces} 3
pass 7144123 2023-01-30 23:06:21 2023-01-31 02:11:32 2023-01-31 02:36:42 0:25:10 0:12:14 0:12:56 smithi main krbd/rbd-nomount/{bluestore-bitmap clusters/fixed-3 conf install/ceph ms_mode/secure msgr-failures/few tasks/krbd_huge_osdmap} 3
pass 7144108 2023-01-30 23:06:07 2023-01-31 01:40:53 2023-01-31 02:12:53 0:32:00 0:16:19 0:15:41 smithi main krbd/rbd-nomount/{bluestore-bitmap clusters/fixed-3 conf install/ceph ms_mode/crc$/{crc-rxbounce} msgr-failures/many tasks/rbd_map_unmap} 3
pass 7144096 2023-01-30 23:06:01 2023-01-31 01:19:47 2023-01-31 01:42:36 0:22:49 0:11:19 0:11:30 smithi main krbd/rbd-nomount/{bluestore-bitmap clusters/fixed-3 conf install/ceph ms_mode/legacy$/{legacy} msgr-failures/many tasks/rbd_kernel} 3
pass 7144069 2023-01-30 23:05:37 2023-01-31 00:44:22 2023-01-31 01:20:21 0:35:59 0:25:40 0:10:19 smithi main rhel 8.6 smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{rhel_8} tasks/{0-install test/rgw_s3tests}} 3
pass 7144055 2023-01-30 23:05:30 2023-01-31 00:14:14 2023-01-31 00:48:37 0:34:23 0:27:19 0:07:04 smithi main rhel 8.6 smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{rhel_8} tasks/{0-install test/rados_workunit_loadgen_mix}} 3
pass 7144048 2023-01-30 23:05:26 2023-01-30 23:36:34 2023-01-31 00:14:33 0:37:59 0:27:25 0:10:34 smithi main centos 8.stream smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{centos_8} tasks/{0-install test/rados_cache_snaps}} 3
pass 7144036 2023-01-30 23:05:13 2023-01-30 23:05:56 2023-01-30 23:36:51 0:30:55 0:20:24 0:10:31 smithi main centos 8.stream smoke/basic/{clusters/{fixed-3-cephfs openstack} objectstore/bluestore-bitmap supported-random-distro$/{centos_8} tasks/{0-install test/cfuse_workunit_suites_blogbench}} 3
fail 7143881 2023-01-30 12:36:06 2023-01-30 13:58:15 2023-01-30 20:58:45 7:00:30 6:44:24 0:16:06 smithi main ubuntu 20.04 fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/5 tasks/fsstress validater/valgrind} 2
Failure Reason:

Command failed (workunit test suites/fsstress.sh) on smithi131 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=860d3ac8d012f04a6667eb411e085656deb62181 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'

fail 7143871 2023-01-30 12:35:57 2023-01-30 13:31:38 2023-01-30 14:03:52 0:32:14 0:19:24 0:12:50 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi131 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 967de6b6-a0a5-11ed-9e56-001a4aab830c -e sha1=860d3ac8d012f04a6667eb411e085656deb62181 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7143759 2023-01-30 09:50:05 2023-01-30 09:56:36 2023-01-30 13:32:46 3:36:10 3:23:17 0:12:53 smithi main centos 8.stream rados:thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-dispatch-delay msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/rados_api_tests} 2
Failure Reason:

Command failed (workunit test rados/test.sh) on smithi131 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=002a15140b0bd67076b61714a97eeeb08b616526 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'