Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 7130173 2023-01-01 15:14:26 2023-01-01 17:20:00 2023-01-01 18:14:15 0:54:15 0:42:45 0:11:30 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/fs/norstats}} 3
fail 7130174 2023-01-01 15:14:27 2023-01-01 17:20:00 2023-01-01 17:50:41 0:30:41 0:18:53 0:11:48 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap-schedule} 2
Failure Reason:

Command failed on smithi106 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

pass 7130175 2023-01-01 15:14:28 2023-01-01 17:20:00 2023-01-01 17:57:27 0:37:27 0:22:42 0:14:45 smithi main ubuntu 20.04 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_suites_pjd}} 2
pass 7130176 2023-01-01 15:14:29 2023-01-01 17:20:01 2023-01-01 17:50:23 0:30:22 0:18:24 0:11:58 smithi main centos 8.stream fs/upgrade/upgraded_client/from_nautilus/{bluestore-bitmap centos_latest clusters/{1-mds-1-client-micro} conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-nautilus 1-client-upgrade 2-client-sanity}} 2
fail 7130177 2023-01-01 15:14:29 2023-01-01 17:20:01 2023-01-01 17:51:32 0:31:31 0:18:01 0:13:30 smithi main centos 8.stream fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-iozone} 3
Failure Reason:

Command failed on smithi158 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

pass 7130178 2023-01-01 15:14:30 2023-01-01 17:20:31 2023-01-01 17:50:28 0:29:57 0:15:48 0:14:09 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap_schedule_snapdir} 2
fail 7130179 2023-01-01 15:14:31 2023-01-01 17:21:02 2023-01-01 17:45:12 0:24:10 0:17:19 0:06:51 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mon 2-workunit/suites/ffsb}} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

pass 7130180 2023-01-01 15:14:31 2023-01-01 17:21:12 2023-01-01 18:00:55 0:39:43 0:25:39 0:14:04 smithi main centos 8.stream fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress} 2
pass 7130181 2023-01-01 15:14:32 2023-01-01 17:21:12 2023-01-01 17:55:58 0:34:46 0:23:31 0:11:15 smithi main centos 8.stream fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_misc} 2
dead 7130182 2023-01-01 15:14:33 2023-01-01 17:21:23 2023-01-02 05:32:28 12:11:05 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/fsstress}} 3
Failure Reason:

hit max job timeout

fail 7130183 2023-01-01 15:14:34 2023-01-01 17:21:54 2023-01-01 17:50:32 0:28:38 0:20:57 0:07:41 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snapshots} 2
Failure Reason:

Test failure: test_allow_new_snaps_config (tasks.cephfs.test_snapshots.TestSnapshots), test_allow_new_snaps_config (tasks.cephfs.test_snapshots.TestSnapshots)

fail 7130184 2023-01-01 15:14:34 2023-01-01 17:21:54 2023-01-01 18:03:18 0:41:24 0:28:59 0:12:25 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

timeout expired in wait_until_healthy

dead 7130185 2023-01-01 15:14:35 2023-01-01 17:23:55 2023-01-02 05:32:35 12:08:40 smithi main rhel 8.6 fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_dbench_iozone} 2
Failure Reason:

hit max job timeout

pass 7130186 2023-01-01 15:14:36 2023-01-01 17:24:26 2023-01-01 18:29:24 1:04:58 0:54:08 0:10:50 smithi main rhel 8.6 fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_dbench traceless/50pc} 2
fail 7130187 2023-01-01 15:14:37 2023-01-01 17:24:26 2023-01-01 17:48:06 0:23:40 0:15:43 0:07:57 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_trivial_sync}} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.1 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.1 && cd /home/ubuntu/cephtest/clone.client.1 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

fail 7130188 2023-01-01 15:14:37 2023-01-01 17:25:06 2023-01-01 17:54:10 0:29:04 0:16:08 0:12:56 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/strays} 2
Failure Reason:

Command failed on smithi027 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

pass 7130189 2023-01-01 15:14:38 2023-01-01 17:25:17 2023-01-01 17:53:45 0:28:28 0:16:20 0:12:08 smithi main ubuntu 20.04 fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/ior-shared-file} 5
fail 7130190 2023-01-01 15:14:39 2023-01-01 17:25:27 2023-01-01 17:55:34 0:30:07 0:23:25 0:06:42 smithi main rhel 8.6 fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth} 2
Failure Reason:

Test failure: test_r_with_fsname_and_no_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps), test_r_with_fsname_and_no_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps)

fail 7130191 2023-01-01 15:14:40 2023-01-01 17:25:47 2023-01-01 17:53:37 0:27:50 0:20:34 0:07:16 smithi main rhel 8.6 fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

fail 7130192 2023-01-01 15:14:40 2023-01-01 17:25:48 2023-01-01 17:53:17 0:27:29 0:20:26 0:07:03 smithi main rhel 8.6 fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/misc}} 2
Failure Reason:

Test failure: test_binary_metafile_on_legacy_to_v1_upgrade (tasks.cephfs.test_volumes.TestMisc), test_binary_metafile_on_legacy_to_v1_upgrade (tasks.cephfs.test_volumes.TestMisc)

fail 7130193 2023-01-01 15:14:41 2023-01-01 17:25:48 2023-01-01 18:07:16 0:41:28 0:28:05 0:13:23 smithi main centos 8.stream fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}} 3
Failure Reason:

"2023-01-01T17:55:31.513561+0000 mgr.x (mgr.14100) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log

dead 7130194 2023-01-01 15:14:42 2023-01-01 17:26:58 2023-01-01 17:46:33 0:19:35 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/test_journal_migration} 2
Failure Reason:

Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds

fail 7130195 2023-01-01 15:14:43 2023-01-01 17:26:59 2023-01-01 17:53:28 0:26:29 0:15:39 0:10:50 smithi main ubuntu 20.04 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/osd 2-workunit/suites/fsstress}} 2
Failure Reason:

Command failed on smithi150 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130196 2023-01-01 15:14:44 2023-01-01 17:26:59 2023-01-01 18:06:25 0:39:26 0:28:34 0:10:52 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/fsx}} 3
Failure Reason:

Command failed on smithi079 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/volumes/_nogroup/sv_1/084104ec-f837-4b7b-b95b-3d21984a55b9 /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs,ms_mode=legacy,nowsync'

fail 7130197 2023-01-01 15:14:44 2023-01-01 17:27:00 2023-01-01 17:54:43 0:27:43 0:20:29 0:07:14 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/truncate_delay} 2
Failure Reason:

Command crashed: "sudo TESTDIR=/home/ubuntu/cephtest bash -c 'cd $TESTDIR/mnt.0 && dd if=/dev/zero of=./foo count=100'"

pass 7130198 2023-01-01 15:14:45 2023-01-01 17:27:30 2023-01-01 17:56:46 0:29:16 0:16:13 0:13:03 smithi main ubuntu 20.04 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mds 2-workunit/fs/trivial_sync}} 2
fail 7130199 2023-01-01 15:14:46 2023-01-01 17:27:50 2023-01-01 18:03:52 0:36:02 0:24:22 0:11:40 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi005 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 5338699e-89fd-11ed-90c7-001a4aab830c -e sha1=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7130200 2023-01-01 15:14:46 2023-01-01 17:27:51 2023-01-01 17:54:58 0:27:07 0:15:38 0:11:29 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/dir-max-entries} 2
Failure Reason:

Command failed on smithi093 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

dead 7130201 2023-01-01 15:14:47 2023-01-01 17:28:01 2023-01-02 05:39:26 12:11:25 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/fsync-tester}} 3
Failure Reason:

hit max job timeout

fail 7130202 2023-01-01 15:14:48 2023-01-01 17:28:22 2023-01-01 17:57:28 0:29:06 0:16:11 0:12:55 smithi main ubuntu 20.04 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/ffsb}} 2
Failure Reason:

Command failed on smithi087 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=a'

fail 7130203 2023-01-01 15:14:49 2023-01-01 17:28:22 2023-01-01 18:15:51 0:47:29 0:33:25 0:14:04 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}} 3
Failure Reason:

timeout expired in wait_until_healthy

fail 7130204 2023-01-01 15:14:49 2023-01-01 17:30:23 2023-01-01 18:01:01 0:30:38 0:20:55 0:09:43 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/quota} 2
Failure Reason:

Command failed (workunit test fs/quota/quota.sh) on smithi154 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && cd -- /home/ubuntu/cephtest/mnt.1/client.1/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="1" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.1 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.1 CEPH_MNT=/home/ubuntu/cephtest/mnt.1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.1/qa/workunits/fs/quota/quota.sh'

pass 7130205 2023-01-01 15:14:50 2023-01-01 17:30:23 2023-01-01 18:03:03 0:32:40 0:19:39 0:13:01 smithi main ubuntu 20.04 fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-comp overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
pass 7130206 2023-01-01 15:14:51 2023-01-01 17:30:53 2023-01-01 17:59:46 0:28:53 0:18:41 0:10:12 smithi main rhel 8.6 fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{rhel_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/libcephfs_python} 2
pass 7130207 2023-01-01 15:14:52 2023-01-01 17:30:53 2023-01-01 18:02:26 0:31:33 0:21:20 0:10:13 smithi main centos 8.stream fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
fail 7130208 2023-01-01 15:14:52 2023-01-01 17:30:54 2023-01-01 18:11:43 0:40:49 0:26:49 0:14:00 smithi main ubuntu 20.04 fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/3 tasks/fsstress validater/lockdep} 2
Failure Reason:

Command failed (workunit test suites/fsstress.sh) on smithi073 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'

fail 7130209 2023-01-01 15:14:53 2023-01-01 17:31:24 2023-01-01 17:56:25 0:25:01 0:18:35 0:06:26 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mon 2-workunit/suites/iozone}} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

fail 7130210 2023-01-01 15:14:54 2023-01-01 17:31:24 2023-01-01 18:00:51 0:29:27 0:22:22 0:07:05 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/acls} 2
Failure Reason:

Test failure: test_acls (tasks.cephfs.test_acls.TestACLs), test_acls (tasks.cephfs.test_acls.TestACLs)

fail 7130211 2023-01-01 15:14:55 2023-01-01 17:31:25 2023-01-01 18:13:28 0:42:03 0:29:34 0:12:29 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

timeout expired in wait_until_healthy

pass 7130212 2023-01-01 15:14:55 2023-01-01 17:31:55 2023-01-01 18:25:23 0:53:28 0:41:49 0:11:39 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{no-subvolume} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/fs/test_o_trunc}} 3
pass 7130213 2023-01-01 15:14:56 2023-01-01 17:31:55 2023-01-01 18:07:55 0:36:00 0:23:43 0:12:17 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/iozone}} 2
fail 7130214 2023-01-01 15:14:57 2023-01-01 17:32:16 2023-01-01 18:01:10 0:28:54 0:16:07 0:12:47 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/admin} 2
Failure Reason:

Command failed on smithi086 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130215 2023-01-01 15:14:58 2023-01-01 17:32:16 2023-01-01 18:02:32 0:30:16 0:17:43 0:12:33 smithi main centos 8.stream fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-pjd} 3
Failure Reason:

Command failed on smithi175 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130216 2023-01-01 15:14:58 2023-01-01 17:32:16 2023-01-01 18:02:45 0:30:29 0:18:29 0:12:00 smithi main centos 8.stream fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/osd 2-workunit/suites/pjd}} 2
Failure Reason:

Command failed on smithi063 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

fail 7130217 2023-01-01 15:14:59 2023-01-01 17:32:17 2023-01-01 18:06:47 0:34:30 0:23:05 0:11:25 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/alternate-pool} 2
Failure Reason:

Test failure: test_rebuild_simple (tasks.cephfs.test_recovery_pool.TestRecoveryPool)

fail 7130218 2023-01-01 15:15:00 2023-01-01 17:32:47 2023-01-01 18:19:39 0:46:52 0:33:52 0:13:00 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}} 3
Failure Reason:

timeout expired in wait_until_healthy

pass 7130219 2023-01-01 15:15:00 2023-01-01 17:32:48 2023-01-01 18:05:12 0:32:24 0:18:27 0:13:57 smithi main ubuntu 20.04 fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/mdtest} 4
fail 7130220 2023-01-01 15:15:01 2023-01-01 17:33:58 2023-01-01 18:02:57 0:28:59 0:16:17 0:12:42 smithi main ubuntu 20.04 fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover} 2
Failure Reason:

Command failed on smithi064 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130221 2023-01-01 15:15:02 2023-01-01 17:37:09 2023-01-01 18:06:18 0:29:09 0:18:53 0:10:16 smithi main rhel 8.6 fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps} 2
Failure Reason:

Command failed on smithi016 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

fail 7130222 2023-01-01 15:15:03 2023-01-01 17:37:09 2023-01-01 18:06:52 0:29:43 0:18:09 0:11:34 smithi main centos 8.stream fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/snapshot}} 2
Failure Reason:

Command failed on smithi100 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

dead 7130223 2023-01-01 15:15:03 2023-01-01 17:37:20 2023-01-01 17:53:17 0:15:57 0:04:47 0:11:10 smithi main fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/testing/k-testing ms-die-on-skipped} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_untarbuild_blogbench} 2
Failure Reason:

{'smithi153.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.696723', 'end': '2023-01-01 17:51:22.512273', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2023-01-01 17:51:17.815550', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2515 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2515 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}, 'smithi044.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.754114', 'end': '2023-01-01 17:51:22.824218', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2023-01-01 17:51:18.070104', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2515 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2515 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}}

pass 7130224 2023-01-01 15:15:04 2023-01-01 17:37:20 2023-01-01 18:46:59 1:09:39 0:58:44 0:10:55 smithi main centos 8.stream fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_ffsb traceless/50pc} 2
dead 7130225 2023-01-01 15:15:05 2023-01-01 17:37:20 2023-01-02 05:45:55 12:08:35 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/iogen}} 3
Failure Reason:

hit max job timeout

fail 7130226 2023-01-01 15:15:06 2023-01-01 17:37:21 2023-01-01 18:07:40 0:30:19 0:21:45 0:08:34 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/asok_dump_tree} 2
Failure Reason:

Test failure: test_basic (tasks.cephfs.test_dump_tree.TestDumpTree)

fail 7130227 2023-01-01 15:15:06 2023-01-01 17:37:41 2023-01-01 18:05:37 0:27:56 0:20:45 0:07:11 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_snaptests}} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

fail 7130228 2023-01-01 15:15:07 2023-01-01 17:37:51 2023-01-01 18:18:23 0:40:32 0:28:03 0:12:29 smithi main centos 8.stream fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}} 3
Failure Reason:

"2023-01-01T18:07:10.319805+0000 mgr.y (mgr.14105) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log

fail 7130229 2023-01-01 15:15:08 2023-01-01 17:38:02 2023-01-01 18:07:57 0:29:55 0:18:45 0:11:10 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/auto-repair} 2
Failure Reason:

Command failed on smithi008 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130230 2023-01-01 15:15:09 2023-01-01 17:38:02 2023-01-01 19:06:51 1:28:49 1:17:43 0:11:06 smithi main ubuntu 20.04 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/osd 2-workunit/fs/snaps}} 2
Failure Reason:

Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi101 with status 128: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'

pass 7130231 2023-01-01 15:15:09 2023-01-01 17:38:32 2023-01-01 18:17:13 0:38:41 0:22:03 0:16:38 smithi main ubuntu 20.04 fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress} 2
pass 7130232 2023-01-01 15:15:10 2023-01-01 17:45:14 2023-01-01 18:16:44 0:31:30 0:18:56 0:12:34 smithi main ubuntu 20.04 fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_misc} 2
fail 7130233 2023-01-01 15:15:11 2023-01-01 17:46:44 2023-01-01 18:14:11 0:27:27 0:15:15 0:12:12 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/backtrace} 2
Failure Reason:

Test failure: test_backtrace (tasks.cephfs.test_backtrace.TestBacktrace)

fail 7130234 2023-01-01 15:15:12 2023-01-01 17:48:15 2023-01-01 18:29:27 0:41:12 0:28:34 0:12:38 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/iozone}} 3
Failure Reason:

Command failed on smithi092 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/volumes/_nogroup/sv_1/4847eda0-24d2-4eb1-8f45-23a5e42559b8 /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,ms_mode=legacy,nowsync,nofallback'

fail 7130235 2023-01-01 15:15:13 2023-01-01 17:50:26 2023-01-01 18:25:27 0:35:01 0:24:18 0:10:43 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi043 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 4aca6b60-8a00-11ed-90c7-001a4aab830c -e sha1=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

fail 7130236 2023-01-01 15:15:13 2023-01-01 17:50:36 2023-01-01 18:19:03 0:28:27 0:16:11 0:12:16 smithi main ubuntu 20.04 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_suites_fsstress}} 2
Failure Reason:

Command failed on smithi111 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.a=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

dead 7130237 2023-01-01 15:15:14 2023-01-01 17:50:37 2023-01-01 18:01:32 0:10:55 0:05:16 0:05:39 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/cap-flush} 2
Failure Reason:

{'smithi120.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}, 'smithi106.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

fail 7130238 2023-01-01 15:15:15 2023-01-01 17:50:47 2023-01-01 18:13:09 0:22:22 0:15:17 0:07:05 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mds 2-workunit/suites/ffsb}} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

fail 7130239 2023-01-01 15:15:16 2023-01-01 17:51:37 2023-01-01 18:40:40 0:49:03 0:37:06 0:11:57 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}} 3
Failure Reason:

timeout expired in wait_until_healthy

fail 7130240 2023-01-01 15:15:16 2023-01-01 17:53:28 2023-01-01 18:21:04 0:27:36 0:16:10 0:11:26 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client-limits} 2
Failure Reason:

Command failed on smithi044 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

dead 7130241 2023-01-01 15:15:17 2023-01-01 17:53:28 2023-01-01 17:57:48 0:04:20 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/pjd}} 3
Failure Reason:

Error reimaging machines: 'NoneType' object has no attribute '_fields'

fail 7130242 2023-01-01 15:15:18 2023-01-01 17:53:39 2023-01-01 18:35:17 0:41:38 0:31:16 0:10:22 smithi main rhel 8.6 fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/5 tasks/dbench validater/valgrind} 2
Failure Reason:

Command failed on smithi139 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

fail 7130243 2023-01-01 15:15:18 2023-01-01 17:53:49 2023-01-01 18:22:23 0:28:34 0:15:27 0:13:07 smithi main ubuntu 20.04 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mon 2-workunit/suites/fsstress}} 2
Failure Reason:

Command failed on smithi105 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

pass 7130244 2023-01-01 15:15:19 2023-01-01 17:53:49 2023-01-01 18:22:27 0:28:38 0:15:02 0:13:36 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client-readahead} 2
fail 7130245 2023-01-01 15:15:20 2023-01-01 17:53:49 2023-01-01 18:36:44 0:42:55 0:29:23 0:13:32 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

timeout expired in wait_until_healthy

pass 7130246 2023-01-01 15:15:21 2023-01-01 17:54:20 2023-01-01 18:30:11 0:35:51 0:25:24 0:10:27 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_suites_pjd}} 2
fail 7130247 2023-01-01 15:15:21 2023-01-01 17:54:50 2023-01-01 18:24:21 0:29:31 0:21:38 0:07:53 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client-recovery} 2
Failure Reason:

Test failure: test_basic (tasks.cephfs.test_client_recovery.TestClientRecovery), test_basic (tasks.cephfs.test_client_recovery.TestClientRecovery)

pass 7130248 2023-01-01 15:15:22 2023-01-01 17:55:01 2023-01-01 18:28:51 0:33:50 0:22:14 0:11:36 smithi main centos 8.stream fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress} 2
pass 7130249 2023-01-01 15:15:23 2023-01-01 17:55:41 2023-01-01 18:24:45 0:29:04 0:18:26 0:10:38 smithi main centos 8.stream fs/bugs/client_trim_caps/{begin/{0-install 1-ceph 2-logrotate} centos_latest clusters/small-cluster conf/{client mds mon osd} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/trim-i24137} 1
fail 7130250 2023-01-01 15:15:24 2023-01-01 17:56:01 2023-01-01 18:37:24 0:41:23 0:29:42 0:11:41 smithi main centos 8.stream fs/cephadm/multivolume/{0-start 1-mount 2-workload/dbench distro/single-container-host} 2
Failure Reason:

timeout expired in wait_until_healthy

fail 7130251 2023-01-01 15:15:24 2023-01-01 17:56:32 2023-01-01 18:23:34 0:27:02 0:14:09 0:12:53 smithi main ubuntu 20.04 fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-common} 3
Failure Reason:

Command failed on smithi129 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

pass 7130252 2023-01-01 15:15:25 2023-01-01 17:56:52 2023-01-01 18:34:12 0:37:20 0:26:55 0:10:25 smithi main rhel 8.6 fs/full/{begin/{0-install 1-ceph 2-logrotate} clusters/1-node-1-mds-1-osd conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mgr-osd-full} 1
pass 7130253 2023-01-01 15:15:26 2023-01-01 17:57:32 2023-01-01 18:27:04 0:29:32 0:18:22 0:11:10 smithi main centos 8.stream fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{centos_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client} 2
pass 7130254 2023-01-01 15:15:26 2023-01-01 17:57:33 2023-01-01 20:29:42 2:32:09 2:22:04 0:10:05 smithi main rhel 8.6 fs/mirror/{begin/{0-install 1-ceph 2-logrotate} cephfs-mirror/one-per-cluster clients/{mirror} cluster/{1-node} mount/fuse objectstore/bluestore-bitmap overrides/{whitelist_health} supported-random-distros$/{rhel_8} tasks/mirror} 1
fail 7130255 2023-01-01 15:15:27 2023-01-01 17:57:33 2023-01-01 18:58:00 1:00:27 0:48:42 0:11:45 smithi main ubuntu 20.04 fs/mirror-ha/{begin/{0-install 1-ceph 2-logrotate} cephfs-mirror/three-per-cluster clients/{mirror} cluster/{1-node} objectstore/bluestore-bitmap overrides/{whitelist_health} supported-random-distro$/{ubuntu_latest} workloads/cephfs-mirror-ha-workunit} 1
Failure Reason:

reached maximum tries (50) after waiting for 300 seconds

pass 7130256 2023-01-01 15:15:28 2023-01-01 17:58:03 2023-01-01 19:15:50 1:17:47 1:04:50 0:12:57 smithi main ubuntu 20.04 fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cephfs_misc_tests} 4
fail 7130257 2023-01-01 15:15:29 2023-01-01 17:59:34 2023-01-01 18:31:47 0:32:13 0:20:21 0:11:52 smithi main ubuntu 20.04 fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover} 2
Failure Reason:

Test failure: test_join_fs_vanilla (tasks.cephfs.test_failover.TestClusterAffinity)

pass 7130258 2023-01-01 15:15:29 2023-01-01 17:59:54 2023-01-01 18:30:37 0:30:43 0:18:18 0:12:25 smithi main ubuntu 20.04 fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_misc} 2
pass 7130259 2023-01-01 15:15:30 2023-01-01 18:00:54 2023-01-01 19:07:08 1:06:14 0:53:56 0:12:18 smithi main ubuntu 20.04 fs/shell/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/cephfs-shell} 2
pass 7130260 2023-01-01 15:15:31 2023-01-01 18:01:05 2023-01-01 19:31:12 1:30:07 1:16:45 0:13:22 smithi main centos 8.stream fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps} 2
pass 7130261 2023-01-01 15:15:32 2023-01-01 18:01:05 2023-01-01 18:28:09 0:27:04 0:17:20 0:09:44 smithi main rhel 8.6 fs/top/{begin/{0-install 1-ceph 2-logrotate} cluster/{1-node} mount/fuse objectstore/bluestore-bitmap overrides/ignorelist_health supported-random-distros$/{rhel_8} tasks/fstop} 1
fail 7130262 2023-01-01 15:15:32 2023-01-01 18:01:05 2023-01-01 19:48:43 1:47:38 1:37:32 0:10:06 smithi main centos 8.stream fs/valgrind/{begin/{0-install 1-ceph 2-logrotate} centos_latest debug mirror/{cephfs-mirror/one-per-cluster clients/mirror cluster/1-node mount/fuse overrides/whitelist_health tasks/mirror}} 1
Failure Reason:

Test failure: test_cephfs_mirror_restart_sync_on_blocklist (tasks.cephfs.test_mirroring.TestMirroring)

pass 7130263 2023-01-01 15:15:33 2023-01-01 18:01:16 2023-01-01 20:31:01 2:29:45 2:19:27 0:10:18 smithi main rhel 8.6 fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/basic}} 2
pass 7130264 2023-01-01 15:15:34 2023-01-01 18:01:36 2023-01-01 18:55:39 0:54:03 0:41:05 0:12:58 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/direct_io}} 3
fail 7130265 2023-01-01 15:15:35 2023-01-01 18:02:36 2023-01-01 18:47:57 0:45:21 0:33:01 0:12:20 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}} 3
Failure Reason:

timeout expired in wait_until_healthy

fail 7130266 2023-01-01 15:15:35 2023-01-01 18:02:37 2023-01-01 18:28:58 0:26:21 0:15:12 0:11:09 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/damage} 2
Failure Reason:

Command failed on smithi063 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

pass 7130267 2023-01-01 15:15:36 2023-01-01 18:02:48 2023-01-01 18:35:34 0:32:46 0:20:27 0:12:19 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/osd 2-workunit/fs/trivial_sync}} 2
dead 7130268 2023-01-01 15:15:37 2023-01-01 18:02:58 2023-01-02 06:11:09 12:08:11 smithi main rhel 8.6 fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_dbench_iozone} 2
Failure Reason:

hit max job timeout

pass 7130269 2023-01-01 15:15:38 2023-01-01 18:03:08 2023-01-01 18:36:49 0:33:41 0:22:46 0:10:55 smithi main centos 8.stream fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress traceless/50pc} 2
fail 7130270 2023-01-01 15:15:38 2023-01-01 18:03:28 2023-01-01 18:43:09 0:39:41 0:27:23 0:12:18 smithi main centos 8.stream fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/data-scan} 2
Failure Reason:

Test failure: test_parallel_execution (tasks.cephfs.test_data_scan.TestDataScan)

dead 7130271 2023-01-01 15:15:39 2023-01-01 18:03:59 2023-01-01 18:18:28 0:14:29 0:06:41 0:07:48 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_trivial_sync}} 2
Failure Reason:

{'smithi157.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

fail 7130272 2023-01-01 15:15:40 2023-01-01 18:05:19 2023-01-01 18:32:26 0:27:07 0:17:19 0:09:48 smithi main centos 8.stream fs/upgrade/nofs/{bluestore-bitmap centos_latest conf/{client mds mon osd} no-mds-cluster overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-upgrade}} 1
Failure Reason:

"2023-01-01T18:28:29.879620+0000 mgr.x (mgr.4109) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log

dead 7130273 2023-01-01 15:15:40 2023-01-01 18:05:19 2023-01-02 06:15:13 12:09:54 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/fs/misc}} 3
Failure Reason:

hit max job timeout

dead 7130274 2023-01-01 15:15:41 2023-01-01 18:05:40 2023-01-01 18:16:41 0:11:01 0:05:33 0:05:28 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/exports} 2
Failure Reason:

{'smithi016.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}, 'smithi018.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

fail 7130275 2023-01-01 15:15:42 2023-01-01 18:06:20 2023-01-01 18:29:06 0:22:46 0:16:08 0:06:38 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mds 2-workunit/suites/iozone}} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

fail 7130276 2023-01-01 15:15:43 2023-01-01 18:06:31 2023-01-01 18:35:14 0:28:43 0:19:05 0:09:38 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/forward-scrub} 2
Failure Reason:

Command failed on smithi110 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

pass 7130277 2023-01-01 15:15:43 2023-01-01 18:06:51 2023-01-01 18:36:01 0:29:10 0:18:11 0:10:59 smithi main centos 8.stream fs/upgrade/upgraded_client/from_nautilus/{bluestore-bitmap centos_latest clusters/{1-mds-1-client-micro} conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-nautilus 1-client-upgrade 2-client-sanity}} 2
fail 7130278 2023-01-01 15:15:44 2023-01-01 18:07:01 2023-01-01 18:38:58 0:31:57 0:20:07 0:11:50 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/ffsb}} 2
Failure Reason:

Command failed on smithi090 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.a=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

fail 7130279 2023-01-01 15:15:45 2023-01-01 18:07:42 2023-01-01 18:33:55 0:26:13 0:15:21 0:10:52 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/fragment} 2
Failure Reason:

Test failure: test_deep_split (tasks.cephfs.test_fragment.TestFragmentation)

fail 7130280 2023-01-01 15:15:46 2023-01-01 18:08:02 2023-01-01 18:47:12 0:39:10 0:28:18 0:10:52 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{no-subvolume} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/kernel_untar_build}} 3
Failure Reason:

Command failed on smithi079 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs,ms_mode=legacy,nowsync'

fail 7130281 2023-01-01 15:15:46 2023-01-01 18:08:02 2023-01-01 18:38:52 0:30:50 0:18:15 0:12:35 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mon 2-workunit/suites/pjd}} 2
Failure Reason:

Command failed on smithi073 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

fail 7130282 2023-01-01 15:15:47 2023-01-01 18:11:53 2023-01-01 18:48:40 0:36:47 0:23:58 0:12:49 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi116 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 96eade46-8a03-11ed-90c7-001a4aab830c -e sha1=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

pass 7130283 2023-01-01 15:15:48 2023-01-01 18:13:13 2023-01-01 18:46:39 0:33:26 0:21:48 0:11:38 smithi main centos 8.stream fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
pass 7130284 2023-01-01 15:15:49 2023-01-01 18:13:34 2023-01-01 18:46:06 0:32:32 0:20:00 0:12:32 smithi main ubuntu 20.04 fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
dead 7130285 2023-01-01 15:15:49 2023-01-01 18:14:14 2023-01-01 18:24:57 0:10:43 0:05:24 0:05:19 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/journal-repair} 2
Failure Reason:

{'smithi085.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}, 'smithi060.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

pass 7130286 2023-01-01 15:15:50 2023-01-01 18:14:24 2023-01-01 18:51:05 0:36:41 0:24:21 0:12:20 smithi main centos 8.stream fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/iozone}} 2
pass 7130287 2023-01-01 15:15:51 2023-01-01 18:15:55 2023-01-01 18:49:40 0:33:45 0:22:31 0:11:14 smithi main ubuntu 20.04 fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu/{latest overrides}} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/1 tasks/fsstress validater/lockdep} 2
fail 7130288 2023-01-01 15:15:52 2023-01-01 18:15:55 2023-01-01 18:43:03 0:27:08 0:15:02 0:12:06 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mds-flush} 2
Failure Reason:

Command failed on smithi035 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130289 2023-01-01 15:15:52 2023-01-01 18:16:46 2023-01-01 18:56:31 0:39:45 0:27:17 0:12:28 smithi main centos 8.stream fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}} 3
Failure Reason:

"2023-01-01T18:44:28.400328+0000 mgr.y (mgr.14122) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log

dead 7130290 2023-01-01 15:15:53 2023-01-01 18:17:16 2023-01-02 06:26:32 12:09:16 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-no-extra-options} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/postgres}} 3
Failure Reason:

hit max job timeout

pass 7130291 2023-01-01 15:15:54 2023-01-01 18:17:16 2023-01-01 18:47:37 0:30:21 0:16:25 0:13:56 smithi main ubuntu 20.04 fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/ior-shared-file} 5
fail 7130292 2023-01-01 15:15:55 2023-01-01 18:19:07 2023-01-01 18:49:16 0:30:09 0:22:38 0:07:31 smithi main rhel 8.6 fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth} 2
Failure Reason:

Test failure: test_r_with_fsname_and_no_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps), test_r_with_fsname_and_no_path_in_cap (tasks.cephfs.test_multifs_auth.TestMDSCaps)

fail 7130293 2023-01-01 15:15:55 2023-01-01 18:19:47 2023-01-01 18:47:08 0:27:21 0:21:06 0:06:15 smithi main rhel 8.6 fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

fail 7130294 2023-01-01 15:15:56 2023-01-01 18:21:08 2023-01-01 18:49:23 0:28:15 0:21:10 0:07:05 smithi main rhel 8.6 fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/clone}} 2
Failure Reason:

Test failure: test_clone_failure_status_failed (tasks.cephfs.test_volumes.TestSubvolumeSnapshotClones), test_clone_failure_status_failed (tasks.cephfs.test_volumes.TestSubvolumeSnapshotClones)

fail 7130295 2023-01-01 15:15:57 2023-01-01 18:22:28 2023-01-01 20:01:38 1:39:10 1:29:05 0:10:05 smithi main centos 8.stream fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mon 2-workunit/fs/snaps}} 2
Failure Reason:

Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi105 with status 128: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'

pass 7130296 2023-01-01 15:15:57 2023-01-01 18:22:29 2023-01-01 19:02:09 0:39:40 0:25:54 0:13:46 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mds-full} 2
fail 7130297 2023-01-01 15:15:58 2023-01-01 18:23:39 2023-01-01 19:02:05 0:38:26 0:29:44 0:08:42 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

timeout expired in wait_until_healthy

fail 7130298 2023-01-01 15:15:59 2023-01-01 18:23:39 2023-01-01 18:51:50 0:28:11 0:17:13 0:10:58 smithi main centos 8.stream fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{centos_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-dbench} 3
Failure Reason:

Command failed on smithi098 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

dead 7130299 2023-01-01 15:16:00 2023-01-01 18:24:50 2023-01-01 18:35:22 0:10:32 0:05:24 0:05:08 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/mds_creation_retry} 2
Failure Reason:

{'smithi085.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}, 'smithi060.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

dead 7130300 2023-01-01 15:16:00 2023-01-01 18:25:00 2023-01-01 18:37:55 0:12:55 0:06:45 0:06:10 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/osd 2-workunit/suites/ffsb}} 2
Failure Reason:

{'smithi043.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

dead 7130301 2023-01-01 15:16:01 2023-01-01 18:25:31 2023-01-02 06:33:03 12:07:32 smithi main rhel 8.6 fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_dbench_iozone} 2
Failure Reason:

hit max job timeout

pass 7130302 2023-01-01 15:16:02 2023-01-01 18:25:31 2023-01-01 19:06:44 0:41:13 0:28:23 0:12:50 smithi main centos 8.stream fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_blogbench traceless/50pc} 2
fail 7130303 2023-01-01 15:16:03 2023-01-01 18:27:11 2023-01-01 19:21:46 0:54:35 0:42:08 0:12:27 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/blogbench}} 3
Failure Reason:

error during scrub thrashing: Command failed on smithi088 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph tell mds.1:1 damage ls'

fail 7130304 2023-01-01 15:16:03 2023-01-01 18:28:52 2023-01-01 18:58:02 0:29:10 0:18:43 0:10:27 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/metrics} 2
Failure Reason:

Command failed on smithi063 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

pass 7130305 2023-01-01 15:16:04 2023-01-01 18:29:02 2023-01-01 20:22:27 1:53:25 1:40:40 0:12:45 smithi main centos 8.stream fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_snaptests}} 2
fail 7130306 2023-01-01 15:16:05 2023-01-01 18:29:13 2023-01-01 19:14:59 0:45:46 0:33:00 0:12:46 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}} 3
Failure Reason:

timeout expired in wait_until_healthy

pass 7130307 2023-01-01 15:16:06 2023-01-01 18:29:33 2023-01-01 19:06:57 0:37:24 0:25:55 0:11:29 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/multimds_misc} 2
pass 7130308 2023-01-01 15:16:06 2023-01-01 18:29:33 2023-01-01 19:04:52 0:35:19 0:22:42 0:12:37 smithi main rhel 8.6 fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-comp overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress} 2
dead 7130309 2023-01-01 15:16:07 2023-01-01 18:30:14 2023-01-01 18:53:32 0:23:18 0:11:10 0:12:08 smithi main rhel 8.6 fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{rhel_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/ino_release_cb} 2
Failure Reason:

{'smithi017.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

pass 7130310 2023-01-01 15:16:08 2023-01-01 18:30:44 2023-01-01 19:02:42 0:31:58 0:21:46 0:10:12 smithi main centos 8.stream fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_misc} 2
fail 7130311 2023-01-01 15:16:09 2023-01-01 18:31:54 2023-01-01 18:58:27 0:26:33 0:15:44 0:10:49 smithi main ubuntu 20.04 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mds 2-workunit/suites/fsstress}} 2
Failure Reason:

Command failed on smithi012 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

dead 7130312 2023-01-01 15:16:10 2023-01-01 18:32:35 2023-01-02 06:43:28 12:10:53 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/dbench}} 3
Failure Reason:

hit max job timeout

fail 7130313 2023-01-01 15:16:10 2023-01-01 18:34:15 2023-01-01 19:14:07 0:39:52 0:28:55 0:10:57 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

timeout expired in wait_until_healthy

dead 7130314 2023-01-01 15:16:11 2023-01-01 18:35:16 2023-01-01 18:47:48 0:12:32 0:06:34 0:05:58 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/openfiletable} 2
Failure Reason:

{'smithi060.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

dead 7130315 2023-01-01 15:16:12 2023-01-01 18:35:26 2023-01-01 18:48:16 0:12:50 0:06:33 0:06:17 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_suites_fsstress}} 2
Failure Reason:

{'smithi139.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

fail 7130316 2023-01-01 15:16:13 2023-01-01 18:35:26 2023-01-01 19:04:42 0:29:16 0:18:21 0:10:55 smithi main centos 8.stream fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/pool-perm} 2
Failure Reason:

Command failed on smithi064 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130317 2023-01-01 15:16:13 2023-01-01 18:35:37 2023-01-01 19:30:41 0:55:04 0:40:45 0:14:19 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}} 3
Failure Reason:

timeout expired in wait_until_healthy

pass 7130318 2023-01-01 15:16:14 2023-01-01 18:36:47 2023-01-01 19:06:05 0:29:18 0:19:00 0:10:18 smithi main centos 8.stream fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mon 2-workunit/fs/trivial_sync}} 2
pass 7130319 2023-01-01 15:16:15 2023-01-01 18:36:57 2023-01-01 19:08:29 0:31:32 0:21:27 0:10:05 smithi main centos 8.stream fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/quota} 2
dead 7130320 2023-01-01 15:16:16 2023-01-01 18:37:28 2023-01-01 18:57:06 0:19:38 0:09:00 0:10:38 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/ffsb}} 3
Failure Reason:

{'smithi043.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

pass 7130321 2023-01-01 15:16:16 2023-01-01 18:37:58 2023-01-01 19:08:03 0:30:05 0:17:41 0:12:24 smithi main ubuntu 20.04 fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-2-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/mdtest} 4
fail 7130322 2023-01-01 15:16:17 2023-01-01 18:39:09 2023-01-01 19:09:58 0:30:49 0:18:11 0:12:38 smithi main rhel 8.6 fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover} 2
Failure Reason:

Command failed on smithi153 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130323 2023-01-01 15:16:18 2023-01-01 18:40:49 2023-01-01 19:10:15 0:29:26 0:18:22 0:11:04 smithi main rhel 8.6 fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps} 2
Failure Reason:

Command failed on smithi005 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

fail 7130324 2023-01-01 15:16:19 2023-01-01 18:43:10 2023-01-01 19:09:14 0:26:04 0:15:14 0:10:50 smithi main ubuntu 20.04 fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/misc}} 2
Failure Reason:

Command failed on smithi035 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130325 2023-01-01 15:16:19 2023-01-01 18:43:10 2023-01-01 19:14:19 0:31:09 0:16:17 0:14:52 smithi main ubuntu 20.04 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_suites_pjd}} 2
Failure Reason:

Command failed on smithi099 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=a'

fail 7130326 2023-01-01 15:16:20 2023-01-01 18:46:11 2023-01-01 19:13:33 0:27:22 0:21:30 0:05:52 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/recovery-fs} 2
Failure Reason:

Test failure: test_recover_fs_after_fsmap_removal (tasks.cephfs.test_recovery_fs.TestFSRecovery), test_recover_fs_after_fsmap_removal (tasks.cephfs.test_recovery_fs.TestFSRecovery)

fail 7130327 2023-01-01 15:16:21 2023-01-01 18:46:41 2023-01-01 19:28:03 0:41:22 0:30:57 0:10:25 smithi main centos 8.stream fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/3 tasks/dbench validater/valgrind} 2
Failure Reason:

Command failed on smithi040 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

fail 7130328 2023-01-01 15:16:22 2023-01-01 18:47:01 2023-01-01 19:15:45 0:28:44 0:19:16 0:09:28 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi055 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 32ba6f54-8a08-11ed-90c7-001a4aab830c -- ceph mon dump -f json'

fail 7130329 2023-01-01 15:16:22 2023-01-01 18:47:12 2023-01-01 19:16:53 0:29:41 0:19:12 0:10:29 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/scrub} 2
Failure Reason:

Command failed on smithi136 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

dead 7130330 2023-01-01 15:16:23 2023-01-01 18:47:22 2023-01-01 19:00:51 0:13:29 0:06:36 0:06:53 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/osd 2-workunit/suites/iozone}} 2
Failure Reason:

{'smithi114.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

dead 7130331 2023-01-01 15:16:24 2023-01-01 18:47:22 2023-01-02 06:57:44 12:10:22 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/fs/norstats}} 3
Failure Reason:

hit max job timeout

pass 7130332 2023-01-01 15:16:25 2023-01-01 18:47:43 2023-01-01 19:18:35 0:30:52 0:19:56 0:10:56 smithi main ubuntu 20.04 fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
pass 7130333 2023-01-01 15:16:25 2023-01-01 18:47:43 2023-01-01 19:21:50 0:34:07 0:23:02 0:11:05 smithi main rhel 8.6 fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
fail 7130334 2023-01-01 15:16:26 2023-01-01 18:47:53 2023-01-01 19:28:01 0:40:08 0:27:37 0:12:31 smithi main centos 8.stream fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}} 3
Failure Reason:

"2023-01-01T19:15:52.771715+0000 mgr.x (mgr.14098) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log

pass 7130335 2023-01-01 15:16:27 2023-01-01 18:48:04 2023-01-01 19:20:49 0:32:45 0:21:08 0:11:37 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/sessionmap} 2
fail 7130336 2023-01-01 15:16:28 2023-01-01 18:48:24 2023-01-01 19:17:58 0:29:34 0:17:59 0:11:35 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mds 2-workunit/suites/pjd}} 2
Failure Reason:

Command failed on smithi116 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph 0@.cephfs=/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,nofallback'

dead 7130337 2023-01-01 15:16:28 2023-01-01 18:48:44 2023-01-01 19:05:19 0:16:35 0:04:41 0:11:54 smithi main fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/testing/k-testing ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_untarbuild_blogbench} 2
Failure Reason:

{'smithi150.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.896027', 'end': '2023-01-01 19:04:18.080024', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2023-01-01 19:04:13.183997', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2515 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2515 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}, 'smithi007.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:05.044847', 'end': '2023-01-01 19:04:18.199213', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2023-01-01 19:04:13.154366', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2515 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2515 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}}

pass 7130338 2023-01-01 15:16:29 2023-01-01 18:49:25 2023-01-01 19:57:01 1:07:36 0:56:27 0:11:09 smithi main rhel 8.6 fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_dbench traceless/50pc} 2
fail 7130339 2023-01-01 15:16:30 2023-01-01 18:49:25 2023-01-01 19:15:39 0:26:14 0:13:48 0:12:26 smithi main ubuntu 20.04 fs/fscrypt/{begin/{0-install 1-ceph 2-logrotate} bluestore-bitmap clusters/1-mds-1-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/v1 mount overrides/{distro/testing/k-testing}} overrides/{ignorelist_health ignorelist_health_more ignorelist_wrongly_marked_down pg-warn} tasks/fscrypt-ffsb} 3
Failure Reason:

Command failed on smithi203 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

dead 7130340 2023-01-01 15:16:31 2023-01-01 18:51:15 2023-01-01 19:04:20 0:13:05 0:06:30 0:06:35 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap-schedule} 2
Failure Reason:

{'smithi098.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

fail 7130341 2023-01-01 15:16:31 2023-01-01 18:51:56 2023-01-01 19:30:26 0:38:30 0:29:18 0:09:12 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

timeout expired in wait_until_healthy

dead 7130342 2023-01-01 15:16:32 2023-01-01 18:51:56 2023-01-01 19:14:11 0:22:15 0:11:10 0:11:05 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/cfuse_workunit_trivial_sync}} 2
Failure Reason:

{'smithi017.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

pass 7130343 2023-01-01 15:16:33 2023-01-01 18:53:37 2023-01-01 19:50:48 0:57:11 0:44:05 0:13:06 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/1 standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/fsstress}} 3
fail 7130344 2023-01-01 15:16:34 2023-01-01 18:55:47 2023-01-01 19:26:03 0:30:16 0:18:26 0:11:50 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap_schedule_snapdir} 2
Failure Reason:

Command failed on smithi100 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,snapdirname=.customsnapkernel,name=0,mds_namespace=cephfs'

fail 7130345 2023-01-01 15:16:34 2023-01-01 18:57:08 2023-01-01 19:54:09 0:57:01 0:46:00 0:11:01 smithi main ubuntu 20.04 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mds 2-workunit/fs/snaps}} 2
Failure Reason:

Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi063 with status 128: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'

fail 7130346 2023-01-01 15:16:35 2023-01-01 18:58:08 2023-01-01 19:43:34 0:45:26 0:33:45 0:11:41 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}} 3
Failure Reason:

timeout expired in wait_until_healthy

fail 7130347 2023-01-01 15:16:36 2023-01-01 18:58:28 2023-01-01 19:49:51 0:51:23 0:39:08 0:12:15 smithi main centos 8.stream fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snapshots} 2
Failure Reason:

Found coredumps on ubuntu@smithi079.front.sepia.ceph.com

fail 7130348 2023-01-01 15:16:37 2023-01-01 19:00:59 2023-01-01 19:25:19 0:24:20 0:15:36 0:08:44 smithi main rhel 8.6 fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/ffsb}} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

dead 7130349 2023-01-01 15:16:37 2023-01-01 19:02:09 2023-01-02 07:11:09 12:09:00 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/no 3-snaps/no 4-flush/no 5-workunit/suites/fsx}} 3
Failure Reason:

hit max job timeout

dead 7130350 2023-01-01 15:16:38 2023-01-01 19:02:10 2023-01-01 19:15:48 0:13:38 0:06:49 0:06:49 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/strays} 2
Failure Reason:

{'smithi182.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

fail 7130351 2023-01-01 15:16:39 2023-01-01 19:02:50 2023-01-01 19:39:27 0:36:37 0:23:55 0:12:42 smithi main centos 8.stream fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}} 2
Failure Reason:

Command failed on smithi093 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 989464ae-8a0a-11ed-90c7-001a4aab830c -e sha1=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"

pass 7130352 2023-01-01 15:16:39 2023-01-01 19:04:31 2023-01-01 19:34:31 0:30:00 0:19:10 0:10:50 smithi main ubuntu 20.04 fs/32bits/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{faked-ino ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
fail 7130353 2023-01-01 15:16:40 2023-01-01 19:04:51 2023-01-01 19:44:01 0:39:10 0:28:58 0:10:12 smithi main centos 8.stream fs/cephadm/renamevolume/{0-start 1-rename distro/single-container-host overrides/ignorelist_health} 2
Failure Reason:

timeout expired in wait_until_healthy

pass 7130354 2023-01-01 15:16:41 2023-01-01 19:05:01 2023-01-01 19:38:11 0:33:10 0:21:10 0:12:00 smithi main rhel 8.6 fs/libcephfs/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-1-client-coloc conf/{client mds mon osd} distro/{rhel_8} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/libcephfs/{test}} 2
pass 7130355 2023-01-01 15:16:42 2023-01-01 19:05:22 2023-01-01 20:23:15 1:17:53 1:04:51 0:13:02 smithi main ubuntu 20.04 fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cephfs_misc_tests} 5
pass 7130356 2023-01-01 15:16:42 2023-01-01 19:06:52 2023-01-01 20:03:39 0:56:47 0:47:44 0:09:03 smithi main rhel 8.6 fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth} 2
pass 7130357 2023-01-01 15:16:43 2023-01-01 19:07:03 2023-01-01 19:40:19 0:33:16 0:22:17 0:10:59 smithi main centos 8.stream fs/permission/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_pjd} 2
pass 7130358 2023-01-01 15:16:44 2023-01-01 19:07:13 2023-01-01 21:06:08 1:58:55 1:46:44 0:12:11 smithi main ubuntu 20.04 fs/snaps/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/workunit/snaps} 2
pass 7130359 2023-01-01 15:16:45 2023-01-01 19:08:03 2023-01-01 20:05:40 0:57:37 0:46:30 0:11:07 smithi main rhel 8.6 fs/volumes/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/volumes/{overrides test/snapshot}} 2
fail 7130360 2023-01-01 15:16:45 2023-01-01 19:08:04 2023-01-01 19:33:15 0:25:11 0:17:37 0:07:34 smithi main rhel 8.6 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/1 tasks/{1-thrash/mon 2-workunit/suites/ffsb}} 2
Failure Reason:

Command crashed: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone https://git.ceph.com/ceph-ci.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0 && git checkout 2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a'

dead 7130361 2023-01-01 15:16:46 2023-01-01 19:08:34 2023-01-01 19:28:33 0:19:59 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/test_journal_migration} 2
Failure Reason:

Error reimaging machines: reached maximum tries (100) after waiting for 600 seconds

fail 7130362 2023-01-01 15:16:47 2023-01-01 19:09:24 2023-01-01 19:48:54 0:39:30 0:28:32 0:10:58 smithi main rhel 8.6 fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/yes 4-flush/yes 5-workunit/suites/fsync-tester}} 3
Failure Reason:

Command failed on smithi153 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/volumes/_nogroup/sv_1/baa5fa5f-e40d-4556-9afa-d230462ea0dd /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs,ms_mode=legacy,nowsync'

pass 7130363 2023-01-01 15:16:48 2023-01-01 19:10:05 2023-01-01 19:36:25 0:26:20 0:15:18 0:11:02 smithi main ubuntu 20.04 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/truncate_delay} 2
fail 7130364 2023-01-01 15:16:48 2023-01-01 19:10:25 2023-01-01 20:03:13 0:52:48 0:37:31 0:15:17 smithi main centos 8.stream fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/no}} 3
Failure Reason:

timeout expired in wait_until_healthy

fail 7130365 2023-01-01 15:16:49 2023-01-01 19:14:16 2023-01-01 19:45:01 0:30:45 0:19:31 0:11:14 smithi main centos 8.stream fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mon 2-workunit/iozone}} 2
Failure Reason:

Command failed on smithi110 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=a'

fail 7130366 2023-01-01 15:16:50 2023-01-01 19:14:16 2023-01-01 19:57:34 0:43:18 0:31:29 0:11:49 smithi main rhel 8.6 fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/5 tasks/fsstress validater/lockdep} 2
Failure Reason:

Command failed (workunit test suites/fsstress.sh) on smithi099 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=2d451fdbbd7269d7b4c18c135c79703e6d1c0c1a TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'

dead 7130367 2023-01-01 15:16:51 2023-01-01 19:14:26 2023-01-01 19:28:15 0:13:49 0:06:32 0:07:17 smithi main rhel 8.6 fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/dir-max-entries} 2
Failure Reason:

{'smithi102.front.sepia.ceph.com': {'attempts': 12, 'censored': "the output has been hidden due to the fact that 'no_log: true' was specified for this result", 'changed': True}}

fail 7130368 2023-01-01 15:16:51 2023-01-01 19:15:07 2023-01-01 19:41:16 0:26:09 0:15:02 0:11:07 smithi main ubuntu 20.04 fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/osd 2-workunit/suites/fsstress}} 2
Failure Reason:

Command failed on smithi092 with status 32: 'sudo nsenter --net=/var/run/netns/ceph-ns--home-ubuntu-cephtest-mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage /bin/mount -t ceph :/ /home/ubuntu/cephtest/mnt.0 -v -o norequire_active_mds,conf=/etc/ceph/ceph.conf,norbytes,name=0,mds_namespace=cephfs'

fail 7130369 2023-01-01 15:16:52 2023-01-01 19:15:07 2023-01-01 19:44:11 0:29:04 0:17:50 0:11:14 smithi main centos 8.stream fs/upgrade/nofs/{bluestore-bitmap centos_latest conf/{client mds mon osd} no-mds-cluster overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-upgrade}} 1
Failure Reason:

"2023-01-01T19:39:04.808476+0000 mgr.y (mgr.4099) 1 : cluster [ERR] Failed to load ceph-mgr modules: prometheus" in cluster log