ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
main
wip-57864
main
smithi
centos 8.stream
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/connectivity random-objectstore$/{bluestore-comp-snappy} supported-random-distro$/{centos_8} tasks/workunits}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-comp openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/fio_4M_rand_rw}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/osd-recovery mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools_crun agent/off mon_election/classic task/test_orch_cli_mon}
"/var/log/ceph/f1b6b488-9fb8-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T09:43:48.413+0000 7f7e0f84f700 0 log_channel(cluster) log [WRN] : Health check failed: 2/5 mons down, quorum a,e,c (MON_DOWN)" in cluster log
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} thrashers/none thrashosds-health workloads/radosbench-high-concurrency}
main
wip-57864
main
smithi
centos 8.stream
rados/objectstore/{backends/objectstore-filestore-memstore supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} thrashers/sync workloads/rados_mon_osdmap_prune}
main
wip-57864
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} tasks/rados_api_tests}
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-stupid rados tasks/mon_recovery validater/valgrind}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/few rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/minsize_recovery thrashosds-health workloads/ec-pool-snaps-few-objects-overwrites}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/pool-access mon_election/classic rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-add}
"/var/log/ceph/41248028-9fb8-11ed-9e56-001a4aab830c/ceph-mon.smithi153.log:2023-01-29T09:45:07.671+0000 7f088a0c7700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/peer mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-stupid rados recovery-overrides/{default} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/radosbench}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/filestore-xfs rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_adoption}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/recovery-unfound-found mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/pg-autoscaler-progress-off mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-delay objectstore/bluestore-hybrid rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=clay-k=4-m=2}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/redirect}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/off fixed-2 mon_election/connectivity start}
"/var/log/ceph/da1c341e-9fb9-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T09:58:12.882+0000 7f1ad3b58700 0 log_channel(cluster) log [WRN] : Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY)" in cluster log
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-low-osd-mem-target openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/fio_4M_rand_write}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/few rados thrashers/pggrow thrashosds-health workloads/test_rbd_api}
"/var/log/ceph/e6f5b074-9fba-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T09:57:08.298+0000 7f7aabfec700 0 log_channel(cluster) log [WRN] : Health detail: HEALTH_WARN 1/3 mons down, quorum a,c" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/pg-autoscaler mon_election/classic msgr-failures/none msgr/async-v2only objectstore/filestore-xfs rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/filestore-xfs rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/none thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/version-number-sanity mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-bluestore/{all/cephtool mon_election/connectivity msgr-failures/none msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/redirect_promote_tests}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-flag}
"/var/log/ceph/ba783224-9fba-11ed-9e56-001a4aab830c/ceph-mon.smithi040.log:2023-01-29T09:58:37.888+0000 7f5d8c040700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} tasks/rados_cls_all}
main
wip-57864
main
smithi
rhel 8.6
rados/multimon/{clusters/21 mon_election/connectivity msgr-failures/many msgr/async no_pools objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8} tasks/mon_recovery}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/pg-removal-interruption mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/classic random-objectstore$/{bluestore-hybrid} tasks/dashboard}
Test failure: test_full_health (tasks.mgr.dashboard.test_health.HealthTest)
main
wip-57864
main
smithi
ubuntu 20.04
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/classic random-objectstore$/{bluestore-comp-lz4} supported-random-distro$/{ubuntu_latest} tasks/crash}
main
wip-57864
main
smithi
rhel 8.6
rados/objectstore/{backends/alloc-hint supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/rest/{mgr-restful supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/calico rook/1.7.2}
Command failed on smithi033 with status 1: 'sudo systemctl enable --now kubelet && sudo kubeadm config images pull'
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-nomsgr/{all/admin_socket_output mon_election/classic rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/c2c}
main
wip-57864
main
smithi
centos 8.stream
rados/upgrade/parallel/{0-random-distro$/{centos_8.stream_container_tools_crun} 0-start 1-tasks mon_election/classic upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
Command failed on smithi159 with status 128: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone --depth 1 --branch quincy https://github.com/chrisphoffman/ceph.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0'
main
wip-57864
main
smithi
centos 8.stream
rados/valgrind-leaks/{1-start 2-inject-leak/mon centos_latest}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/off mon_election/classic task/test_cephadm}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/redirect_set_object}
main
wip-57864
main
smithi
ubuntu 20.04
rados/monthrash/{ceph clusters/9-mons mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/filestore-xfs rados supported-random-distro$/{ubuntu_latest} thrashers/force-sync-many workloads/rados_mon_workunits}
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v1only objectstore/filestore-xfs rados tasks/rados_api_tests validater/lockdep}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/filestore-xfs rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/radostool mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools 0-nvme-loop agent/off fixed-2 mon_election/classic start}
"/var/log/ceph/a94f5804-9fbc-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T10:09:17.214+0000 7fd12b123700 0 log_channel(cluster) log [WRN] : Health check failed: 1/3 mons down, quorum a,b (MON_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/balancer mon_election/connectivity rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/fastclose objectstore/bluestore-bitmap rados recovery-overrides/{default} supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-stupid openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/radosbench_4K_rand_read}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/set-chunks-read}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/random-eio mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8}}
Error reimaging machines: 'NoneType' object has no attribute '_fields'
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/smoke-singlehost/{0-random-distro$/{rhel_8.6_container_tools_3.0} 1-start 2-services/basic 3-final}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=jerasure-k=2-m=1}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-delay rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/ec-small-objects-fast-read-overwrites}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/cache-fs-trunc mon_election/classic rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/small-objects-balanced}
main
wip-57864
main
smithi
ubuntu 20.04
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} tasks/rados_python}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/rm-zap-wait}
"/var/log/ceph/aeaa794a-9fbd-11ed-9e56-001a4aab830c/ceph-mon.smithi136.log:2023-01-29T10:19:40.137+0000 7f06e543d700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-bitmap rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/rebuild-mondb mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/ceph-kvstore-tool mon_election/connectivity rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async objectstore/filestore-xfs rados supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/small-objects-localized}
main
wip-57864
main
smithi
centos 8.stream
rados/objectstore/{backends/ceph_objectstore_tool supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_cephadm_repos}
main
wip-57864
main
smithi
centos 8.stream
rados/multimon/{clusters/3 mon_election/classic msgr-failures/few msgr/async-v1only no_pools objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8} tasks/mon_clock_no_skews}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/recovery-preemption mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-basic-min-osd-mem-target openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/radosbench_4K_seq_read}
main
wip-57864
main
smithi
rhel 8.6
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/enable mon_election/connectivity random-objectstore$/{bluestore-low-osd-mem-target} supported-random-distro$/{rhel_8} tasks/failover}
main
wip-57864
main
smithi
rhel 8.6
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8} thrashers/many workloads/rados_mon_workunits}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/ceph-post-file mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
rhel 8.6
rados/standalone/{supported-random-distro$/{rhel_8} workloads/crush}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/small-objects}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/osd-delay rados thrashers/careful thrashosds-health workloads/cache-snaps}
"/var/log/ceph/45215cb2-9fbf-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T10:39:59.999+0000 7fb61211a700 0 log_channel(cluster) log [WRN] : Health detail: HEALTH_WARN nodeep-scrub flag(s) set" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-bitmap rados tasks/rados_api_tests validater/valgrind}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"/var/log/ceph/3968de72-9fbf-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T10:28:14.762+0000 7ff2d02d5700 0 log_channel(cluster) log [WRN] : Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-bitmap rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/resolve_stuck_peering mon_election/classic msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/few objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools agent/off mon_election/classic task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/snaps-few-objects-balanced}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/crushdiff mon_election/connectivity rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} tasks/rados_stress_watch}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/test-crash mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test rados/test_crash.sh) on smithi062 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=8965ca3bc5c900c1b534ee8ca638a8aa0e2c61db TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_crash.sh'
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/fastclose objectstore/bluestore-stupid rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=3-m=1}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/osds/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
"/var/log/ceph/b74f2b34-9fbf-11ed-9e56-001a4aab830c/ceph-mon.smithi110.log:2023-01-29T10:34:48.787+0000 7f0f90a77700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/snaps-few-objects-localized}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/export-after-evict mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-bitmap openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/radosbench_4M_rand_read}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/test-noautoscale-flag mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/smoke/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop agent/off fixed-2 mon_election/classic start}
"/var/log/ceph/fb85a9f4-9fbf-11ed-9e56-001a4aab830c/ceph-mon.c.log:2023-01-29T10:35:44.996+0000 7f04c6067700 7 mon.c@2(peon).log v168 update_from_paxos applying incremental log 168 2023-01-29T10:35:44.367461+0000 mon.a (mon.0) 521 : cluster [WRN] Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/objectstore/{backends/filejournal supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/snaps-few-objects}
main
wip-57864
main
smithi
ubuntu 20.04
rados/multimon/{clusters/6 mon_election/connectivity msgr-failures/many msgr/async-v2only no_pools objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} tasks/mon_clock_with_skews}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/full-tiering mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/osd-dispatch-delay rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/ec-small-objects-overwrites}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools_crun agent/on mon_election/connectivity task/test_nfs}
"/var/log/ceph/4b15a2ac-9fc1-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T10:43:11.013+0000 7f67cc564700 0 log_channel(cluster) log [WRN] : Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/test_envlibrados_for_rocksdb mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/filestore-xfs rados supported-random-distro$/{rhel_8}}
Command failed (workunit test rados/test_envlibrados_for_rocksdb.sh) on smithi027 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=8965ca3bc5c900c1b534ee8ca638a8aa0e2c61db TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_envlibrados_for_rocksdb.sh'
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-lz4 rados tasks/rados_cls_all validater/lockdep}
main
wip-57864
main
smithi
centos 8.stream
rados/monthrash/{ceph clusters/9-mons mon_election/connectivity msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8} thrashers/one workloads/snaps-few-objects}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/write_fadvise_dontneed}
main
wip-57864
main
smithi
ubuntu 20.04
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/classic random-objectstore$/{bluestore-comp-zstd} supported-random-distro$/{ubuntu_latest} tasks/insights}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} tasks/rados_striper}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/none msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/osds/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-ops/repave-all}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-nomsgr/{all/health-warnings mon_election/classic rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/erasure-code}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/thrash-backfill-full mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/osd-delay objectstore/bluestore-comp-snappy rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/admin_socket_objecter_requests}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-comp openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/radosbench_4M_seq_read}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/smoke/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"/var/log/ceph/d26e3304-9fc1-11ed-9e56-001a4aab830c/ceph-mon.c.log:2023-01-29T10:47:01.534+0000 7f474bf4b700 7 mon.c@2(peon).log v99 update_from_paxos applying incremental log 99 2023-01-29T10:47:01.496949+0000 mon.a (mon.0) 345 : cluster [WRN] Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/connectivity random-objectstore$/{bluestore-comp-zstd} tasks/e2e}
"/var/log/ceph/ce04ed66-9fc2-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T11:00:46.679+0000 7f21ec716700 0 log_channel(cluster) log [WRN] : Health check failed: 1 host is in maintenance mode (HOST_IN_MAINTENANCE)" in cluster log
main
wip-57864
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/flannel rook/master}
Command failed on smithi033 with status 1: 'sudo systemctl enable --now kubelet && sudo kubeadm config images pull'
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/large-omap-object-warnings mon_election/connectivity rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/few objectstore/filestore-xfs rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/fastread thrashosds-health workloads/ec-radosbench}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/thrash-eio mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/octopus backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/radosbench}
"/var/log/ceph/57bc1110-9fc3-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T11:09:59.999+0000 7f7b8497e700 0 log_channel(cluster) log [WRN] : Health detail: HEALTH_WARN noscrub,nodeep-scrub flag(s) set; Degraded data redundancy: 15532/9 objects degraded (172577.778%), 6 pgs degraded, 6 pgs undersized" in cluster log
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/off mon_election/classic task/test_orch_cli}
"/var/log/ceph/02b90434-9fc3-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T10:57:14.590+0000 7f11acaf9700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/cache-agent-big}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-comp-snappy rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
centos 8.stream
rados/objectstore/{backends/filestore-idempotent-aio-journal supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/thrash-rados/{thrash-rados thrashosds-health} mon_election/classic msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-nomsgr/{all/lazy_omap_stats_output mon_election/classic rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/smoke-singlehost/{0-random-distro$/{ubuntu_20.04} 1-start 2-services/rgw 3-final}
"/var/log/ceph/30c16448-9fc3-11ed-9e56-001a4aab830c/ceph-mon.smithi142.log:2023-01-29T10:58:28.957+0000 7f8e23de0700 0 log_channel(cluster) log [WRN] : Health check failed: 1 slow ops, oldest one blocked for 31 sec, mon.smithi142 has slow ops (SLOW_OPS)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/multimon/{clusters/9 mon_election/classic msgr-failures/few msgr/async no_pools objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} tasks/mon_recovery}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/cache-agent-small}
main
wip-57864
main
smithi
rhel 8.6
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async objectstore/filestore-xfs rados supported-random-distro$/{rhel_8} tasks/rados_workunit_loadgen_big}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/thrash_cache_writeback_proxy_none mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/librados_hello_world mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test rados/test_librados_build.sh) on smithi191 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=8965ca3bc5c900c1b534ee8ca638a8aa0e2c61db TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_librados_build.sh'
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/osds/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rm-zap-add}
"/var/log/ceph/f824ca9e-9fc7-11ed-9e56-001a4aab830c/ceph-mon.smithi114.log:2023-01-29T11:32:58.882+0000 7f0164b22700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-snappy rados tasks/mon_recovery validater/valgrind}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-low-osd-mem-target openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/radosbench_4M_write}
main
wip-57864
main
smithi
ubuntu 20.04
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} thrashers/sync-many workloads/pool-create-delete}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-comp-snappy rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{default} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async objectstore/filestore-xfs rados supported-random-distro$/{centos_8} thrashers/mapgap thrashosds-health workloads/cache-pool-snaps-readproxy}
main
wip-57864
main
smithi
rhel 8.6
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/enable mon_election/connectivity random-objectstore$/{bluestore-comp-lz4} supported-random-distro$/{rhel_8} tasks/module_selftest}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/watch-notify-same-primary mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/on mon_election/connectivity task/test_orch_cli_mon}
"/var/log/ceph/97dcaf60-9fc9-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T11:42:52.919+0000 7f9459f09700 0 log_channel(cluster) log [WRN] : Health check failed: 1/5 mons down, quorum a,e,c,d (MON_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/bluestore-comp-zlib rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/msgr mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/mgr}
main
wip-57864
main
smithi
centos 8.stream
rados/valgrind-leaks/{1-start 2-inject-leak/none centos_latest}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-dispatch-delay rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-pool-snaps-few-objects-overwrites}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/cache-pool-snaps}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/off fixed-2 mon_election/classic start}
"/var/log/ceph/3716fa72-9fca-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T11:54:46.038+0000 7f9d6d00f700 0 log_channel(cluster) log [WRN] : Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY)" in cluster log
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-delay objectstore/bluestore-bitmap rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/minsize_recovery thrashosds-health workloads/ec-small-objects-balanced}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/admin-socket mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-nomsgr/{all/multi-backfill-reject mon_election/connectivity rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/objectstore/{backends/filestore-idempotent supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8} thrashers/none thrashosds-health workloads/cache-snaps-balanced}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-comp-zlib rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
rhel 8.6
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8} tasks/rados_workunit_loadgen_mix}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/workunits/{0-distro/ubuntu_20.04 agent/off mon_election/classic task/test_adoption}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-stupid openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/radosbench_omap_write}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/backfill-toofull mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/osd_stale_reads mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/multimon/{clusters/21 mon_election/connectivity msgr-failures/many msgr/async-v1only no_pools objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest} tasks/mon_clock_no_skews}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/osds/{0-distro/ubuntu_20.04 0-nvme-loop 1-start 2-ops/rm-zap-flag}
"/var/log/ceph/6a4c1a24-9fd1-11ed-9e56-001a4aab830c/ceph-mon.smithi158.log:2023-01-29T12:45:14.812+0000 7fb952f01700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/cache-snaps}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/pacific backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/rbd_cls}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/deduptool mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest}}
Command failed (workunit test rados/test_dedup_tool.sh) on smithi116 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=8965ca3bc5c900c1b534ee8ca638a8aa0e2c61db TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_dedup_tool.sh'
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zlib rados tasks/rados_api_tests validater/lockdep}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-comp-zlib rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
rhel 8.6
rados/monthrash/{ceph clusters/9-mons mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{rhel_8} thrashers/sync workloads/rados_5925}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"/var/log/ceph/1470b040-9fd3-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T12:50:02.487+0000 7fb8ec84e700 0 log_channel(cluster) log [WRN] : Health check failed: 1/3 mons down, quorum a,b (MON_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/pool-access mon_election/connectivity rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/cache}
main
wip-57864
main
smithi
rhel 8.6
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/classic random-objectstore$/{bluestore-comp-zlib} supported-random-distro$/{rhel_8} tasks/progress}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/fastclose objectstore/bluestore-comp-zstd rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/divergent_priors mon_election/classic msgr-failures/none msgr/async-v2only objectstore/filestore-xfs rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools agent/on mon_election/connectivity task/test_cephadm}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/recovery-unfound-found mon_election/classic rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/standalone/{supported-random-distro$/{rhel_8} workloads/misc}
Command failed (workunit test misc/test-ceph-helpers.sh) on smithi106 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=8965ca3bc5c900c1b534ee8ca638a8aa0e2c61db TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/misc/test-ceph-helpers.sh'
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/dedup-io-mixed}
main
wip-57864
main
smithi
rhel 8.6
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8} tasks/rados_workunit_loadgen_mostlyread}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-basic-min-osd-mem-target openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/sample_fio}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/bluestore-comp-lz4 rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/ec-small-objects-fast-read}
main
wip-57864
main
smithi
rhel 8.6
rados/objectstore/{backends/fusestore supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/divergent_priors2 mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/osds/{0-distro/centos_8.stream_container_tools 0-nvme-loop 1-start 2-ops/rm-zap-wait}
"/var/log/ceph/04476550-9fd4-11ed-9e56-001a4aab830c/ceph-mon.smithi150.log:2023-01-29T12:59:43.670+0000 7fcda8bef700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-bluestore/{all/cephtool mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-comp-zstd rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/dedup-io-snaps}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/version-number-sanity mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/fastclose rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-small-objects-fast-read-overwrites}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/off fixed-2 mon_election/classic start}
"/var/log/ceph/92e5fe02-9fd4-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T13:00:21.712+0000 7f481527b700 0 log_channel(cluster) log [WRN] : Health check failed: 1/3 mons down, quorum a,b (MON_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/dump-stuck mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/multimon/{clusters/3 mon_election/classic msgr-failures/few msgr/async-v2only no_pools objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8} tasks/mon_clock_with_skews}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/pool-snaps-few-objects}
main
wip-57864
main
smithi
centos 8.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/connectivity random-objectstore$/{bluestore-comp-snappy} tasks/dashboard}
Test failure: test_full_health (tasks.mgr.dashboard.test_health.HealthTest)
main
wip-57864
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/radosbench cluster/1-node k8s/1.21 net/host rook/1.7.2}
Command failed on smithi099 with status 1: 'sudo systemctl enable --now kubelet && sudo kubeadm config images pull'
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-nomsgr/{all/admin_socket_output mon_election/connectivity rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/upgrade/parallel/{0-random-distro$/{rhel_8.6_container_tools_3.0} 0-start 1-tasks mon_election/connectivity upgrade-sequence workload/{ec-rados-default rados_api rados_loadgenbig rbd_import_export test_rbd_api test_rbd_python}}
Command failed on smithi121 with status 128: 'rm -rf /home/ubuntu/cephtest/clone.client.0 && git clone --depth 1 --branch quincy https://github.com/chrisphoffman/ceph.git /home/ubuntu/cephtest/clone.client.0 && cd /home/ubuntu/cephtest/clone.client.0'
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools_crun agent/off mon_election/classic task/test_cephadm_repos}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/ec-inconsistent-hinfo mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-comp-zstd rados tasks/rados_cls_all validater/valgrind}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
ubuntu 20.04
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async-v1only objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest} thrashers/force-sync-many workloads/rados_api_tests}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-bitmap openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/sample_radosbench}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/rados_api_tests}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/smoke-singlehost/{0-random-distro$/{rhel_8.6_container_tools_rhel8} 1-start 2-services/basic 3-final}
main
wip-57864
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8} tasks/readwrite}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/balancer mon_election/classic rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/few objectstore/bluestore-hybrid rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/ec-lost-unfound mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/enable mon_election/connectivity random-objectstore$/{bluestore-comp-snappy} supported-random-distro$/{centos_8} tasks/prometheus}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/quincy backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/snaps-few-objects}
"/var/log/ceph/f9422246-9fd6-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T13:17:50.116+0000 7f529606c700 0 log_channel(cluster) log [WRN] : Health detail: HEALTH_WARN 1/3 mons down, quorum a,c" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/rmdir-reactivate}
"/var/log/ceph/bb35f19e-9fd6-11ed-9e56-001a4aab830c/ceph-mon.smithi114.log:2023-01-29T13:18:52.242+0000 7fc9984c0700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
rhel 8.6
rados/objectstore/{backends/keyvaluedb supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async objectstore/filestore-xfs rados supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/radosbench-high-concurrency}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/fastclose objectstore/bluestore-comp-snappy rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/ec-small-objects-many-deletes}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/cache-fs-trunc mon_election/connectivity rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
centos 8.stream
rados/standalone/{supported-random-distro$/{centos_8} workloads/mon}
Command failed (workunit test mon/health-mute.sh) on smithi132 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=8965ca3bc5c900c1b534ee8ca638a8aa0e2c61db TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/standalone/mon/health-mute.sh'
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/erasure-code-nonregression mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/smoke/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"/var/log/ceph/8b0486c0-9fd6-11ed-9e56-001a4aab830c/ceph-mon.c.log:2023-01-29T13:14:40.238+0000 7fb1dcbfe700 7 mon.c@2(synchronizing).log v58 update_from_paxos applying incremental log 57 2023-01-29T13:14:38.251241+0000 mon.a (mon.0) 204 : cluster [WRN] Health check failed: 1/3 mons down, quorum a,b (MON_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-hybrid rados recovery-overrides/{default} supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/radosbench}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/lost-unfound-delete mon_election/classic msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/ceph-kvstore-tool mon_election/classic rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-comp openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/fio_4K_rand_read}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/on mon_election/connectivity task/test_iscsi_pids_limit/{centos_8.stream_container_tools test_iscsi_pids_limit}}
Command failed on smithi035 with status 1: 'TESTDIR=/home/ubuntu/cephtest bash -s'
main
wip-57864
main
smithi
ubuntu 20.04
rados/multimon/{clusters/6 mon_election/connectivity msgr-failures/many msgr/async no_pools objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} tasks/mon_recovery}
main
wip-57864
main
smithi
ubuntu 20.04
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} tasks/repair_test}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/redirect}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/fastclose objectstore/bluestore-hybrid rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-hybrid rados tasks/mon_recovery validater/lockdep}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/lost-unfound mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/monthrash/{ceph clusters/9-mons mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8} thrashers/many workloads/rados_mon_osdmap_prune}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_rhel8 agent/off mon_election/classic task/test_nfs}
"/var/log/ceph/6eba4d7c-9fd8-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T13:30:41.448+0000 7f7427b74700 0 log_channel(cluster) log [WRN] : Replacing daemon mds.a.smithi084.qhhukv as rank 0 with standby daemon mds.user_test_fs.smithi084.dolfwg" in cluster log
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-nomsgr/{all/ceph-post-file mon_election/connectivity rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/few rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{rhel_8} thrashers/fastread thrashosds-health workloads/ec-small-objects-overwrites}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/redirect_promote_tests}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/osd-delay objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/osds/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop 1-start 2-ops/repave-all}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/max-pg-per-osd.from-mon mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
rhel 8.6
rados/objectstore/{backends/objectcacher-stress supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/crushdiff mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/classic random-objectstore$/{filestore-xfs} supported-random-distro$/{ubuntu_latest} tasks/workunits}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/few objectstore/bluestore-comp-zlib rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-small-objects}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/redirect_set_object}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/smoke/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop agent/off fixed-2 mon_election/classic start}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-low-osd-mem-target openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/fio_4K_rand_rw}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/max-pg-per-osd.from-primary mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/filestore-xfs rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/export-after-evict mon_election/connectivity rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/standalone/{supported-random-distro$/{centos_8} workloads/osd-backfill}
main
wip-57864
main
smithi
centos 8.stream
rados/valgrind-leaks/{1-start 2-inject-leak/osd centos_latest}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/none thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/workunits/{0-distro/ubuntu_20.04 agent/on mon_election/connectivity task/test_orch_cli}
"/var/log/ceph/3da0b2a6-9fda-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T13:42:57.380+0000 7ff1f6c93700 0 log_channel(cluster) log [WRN] : Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} tasks/scrub_test}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8} thrashers/none thrashosds-health workloads/set-chunks-read}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat mon_election/classic msgr-failures/fastclose rados thrashers/none thrashosds-health workloads/test_rbd_api}
"/var/log/ceph/9de7d41e-9fda-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T13:43:49.708+0000 7f819c808700 0 log_channel(cluster) log [WRN] : Health detail: HEALTH_WARN 1/3 mons down, quorum a,c" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/max-pg-per-osd.from-replica mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/multimon/{clusters/9 mon_election/classic msgr-failures/few msgr/async-v1only no_pools objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest} tasks/mon_clock_no_skews}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-nomsgr/{all/full-tiering mon_election/classic rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/cephadm/smoke/{0-distro/ubuntu_20.04 0-nvme-loop agent/on fixed-2 mon_election/connectivity start}
"/var/log/ceph/98e3c658-9fda-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T13:46:06.458+0000 7fc087ae5700 0 log_channel(cluster) log [WRN] : Health check failed: 1 stray daemon(s) not managed by cephadm (CEPHADM_STRAY_DAEMON)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/classic msgr-failures/few objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton-bluestore/{all/cephtool mon_election/classic msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{default} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/small-objects-balanced}
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} mon_election/connectivity msgr-failures/few msgr/async-v2only objectstore/bluestore-low-osd-mem-target rados tasks/rados_api_tests validater/valgrind}
main
wip-57864
main
smithi
ubuntu 20.04
rados/monthrash/{ceph clusters/3-mons mon_election/classic msgr-failures/mon-delay msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} thrashers/one workloads/rados_mon_workunits}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/mon-auth-caps mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/osds/{0-distro/rhel_8.6_container_tools_3.0 0-nvme-loop 1-start 2-ops/rm-zap-add}
"/var/log/ceph/3a698882-9fdb-11ed-9e56-001a4aab830c/ceph-mon.smithi037.log:2023-01-29T13:51:58.828+0000 7fb9d2583700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/health-warnings mon_election/connectivity rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-stupid openstack scheduler/dmclock_1Shard_16Threads settings/optimized ubuntu_latest workloads/fio_4M_rand_read}
main
wip-57864
main
smithi
ubuntu 20.04
rados/objectstore/{backends/objectstore-bluestore-a supported-random-distro$/{ubuntu_latest}}
Command failed on smithi112 with status 1: 'sudo TESTDIR=/home/ubuntu/cephtest bash -c \'mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-bluestore 20" ceph_test_objectstore --gtest_filter=*/2:-*SyntheticMatrixC* --gtest_catch_exceptions=0\''
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/bluestore-stupid rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/small-objects-localized}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/mon-config-key-caps mon_election/classic msgr-failures/none msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools agent/off mon_election/classic task/test_orch_cli_mon}
"/var/log/ceph/0500ab1a-9fdd-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T14:05:52.347+0000 7f247c174700 0 log_channel(cluster) log [WRN] : Health check failed: Degraded data redundancy: 2/6 objects degraded (33.333%), 1 pg degraded (PG_DEGRADED)" in cluster log
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast mon_election/classic msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=clay-k=4-m=2}
hit max job timeout
main
wip-57864
main
smithi
centos 8.stream
rados/dashboard/{0-single-container-host debug/mgr mon_election/classic random-objectstore$/{bluestore-comp-zlib} tasks/e2e}
"/var/log/ceph/f6b2703e-9fdc-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T14:07:43.165+0000 7ff0d859a700 0 log_channel(cluster) log [WRN] : Health check failed: 1 host is in maintenance mode (HOST_IN_MAINTENANCE)" in cluster log
main
wip-57864
main
smithi
rhel 8.6
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/enable mon_election/connectivity random-objectstore$/{bluestore-comp-zstd} supported-random-distro$/{rhel_8} tasks/crash}
main
wip-57864
main
smithi
ubuntu 20.04
rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none cluster/3-node k8s/1.21 net/calico rook/master}
Command failed on smithi064 with status 1: 'sudo systemctl enable --now kubelet && sudo kubeadm config images pull'
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/large-omap-object-warnings mon_election/classic rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8} tasks/libcephsqlite}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/small-objects}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/osd-delay rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/minsize_recovery thrashosds-health workloads/ec-snaps-few-objects-overwrites}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools 0-nvme-loop agent/on fixed-2 mon_election/classic start}
"/var/log/ceph/83e334c0-9fdd-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T14:04:24.962+0000 7f9a59bfb700 0 log_channel(cluster) log [WRN] : Health check failed: 1/3 mons down, quorum a,b (MON_DOWN)" in cluster log
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/mon-config-keys mon_election/connectivity msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/classic msgr-failures/osd-dispatch-delay objectstore/bluestore-stupid rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-nomsgr/{all/lazy_omap_stats_output mon_election/connectivity rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/osd}
main
wip-57864
main
smithi
rhel 8.6
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async objectstore/filestore-xfs rados supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/snaps-few-objects-balanced}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/smoke-singlehost/{0-random-distro$/{centos_8.stream_container_tools} 1-start 2-services/rgw 3-final}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/mon-config mon_election/classic msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/multimon/{clusters/21 mon_election/connectivity msgr-failures/many msgr/async-v2only no_pools objectstore/filestore-xfs rados supported-random-distro$/{centos_8} tasks/mon_clock_with_skews}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/connectivity objectstore/bluestore-basic-min-osd-mem-target openstack scheduler/dmclock_default_shards settings/optimized ubuntu_latest workloads/fio_4M_rand_rw}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} mon_election/connectivity msgr-failures/osd-delay objectstore/bluestore-stupid rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2}
main
wip-57864
main
smithi
centos 8.stream
rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-stupid rados tasks/rados_cls_all validater/lockdep}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/osds/{0-distro/rhel_8.6_container_tools_rhel8 0-nvme-loop 1-start 2-ops/rm-zap-flag}
"/var/log/ceph/19e4bf5c-9fde-11ed-9e56-001a4aab830c/ceph-mon.smithi093.log:2023-01-29T14:11:32.399+0000 7f4aec120700 0 log_channel(cluster) log [WRN] : Health check failed: 1 osds down (OSD_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/librados_hello_world mon_election/classic rados supported-random-distro$/{centos_8}}
Command failed (workunit test rados/test_librados_build.sh) on smithi191 with status 2: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=8965ca3bc5c900c1b534ee8ca638a8aa0e2c61db TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test_librados_build.sh'
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-2} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/osd-delay msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/snaps-few-objects-localized}
main
wip-57864
main
smithi
ubuntu 20.04
rados/monthrash/{ceph clusters/9-mons mon_election/connectivity msgr-failures/few msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest} thrashers/sync-many workloads/snaps-few-objects}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton/{all/osd-backfill mon_election/connectivity msgr-failures/none msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
rhel 8.6
rados/objectstore/{backends/objectstore-bluestore-b supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-old-clients/{0-distro$/{centos_8.stream_container_tools} 0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on mon_election/connectivity msgr-failures/few rados thrashers/pggrow thrashosds-health workloads/cache-snaps}
"/var/log/ceph/91a37474-9fdf-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T14:19:33.769+0000 7fb721596700 0 log_channel(cluster) log [WRN] : Health detail: HEALTH_WARN 1/3 mons down, quorum a,c" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/workunits/{0-distro/centos_8.stream_container_tools_crun agent/on mon_election/connectivity task/test_adoption}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} mon_election/classic msgr-failures/fastclose objectstore/filestore-xfs rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3}
main
wip-57864
main
smithi
rhel 8.6
rados/basic/{ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8} tasks/rados_api_tests}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-partial-recovery} 3-scrub-overrides/{max-simultaneous-scrubs-3} backoff/peering ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/osd-dispatch-delay msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/snaps-few-objects}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/msgr mon_election/connectivity rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/osd-recovery-incomplete mon_election/classic msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
centos 8.stream
rados/cephadm/smoke/{0-distro/centos_8.stream_container_tools_crun 0-nvme-loop agent/off fixed-2 mon_election/connectivity start}
"/var/log/ceph/18a00d0c-9fe0-11ed-9e56-001a4aab830c/ceph-mon.a.log:2023-01-29T14:23:01.349+0000 7f7252f9c700 0 log_channel(cluster) log [WRN] : Health check failed: 1/3 mons down, quorum a,b (MON_DOWN)" in cluster log
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal mon_election/connectivity msgr-failures/osd-dispatch-delay objectstore/bluestore-hybrid rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{centos_8} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=jerasure-k=2-m=1}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} 3-scrub-overrides/{default} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} crc-failures/default d-balancer/on mon_election/connectivity msgr-failures/fastclose msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/write_fadvise_dontneed}
main
wip-57864
main
smithi
centos 8.stream
rados/mgr/{clusters/{2-node-mgr} debug/mgr mgr_ttl_cache/disable mon_election/classic random-objectstore$/{bluestore-hybrid} supported-random-distro$/{centos_8} tasks/failover}
main
wip-57864
main
smithi
rhel 8.6
rados/singleton-nomsgr/{all/multi-backfill-reject mon_election/classic rados supported-random-distro$/{rhel_8}}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton/{all/osd-recovery mon_election/connectivity msgr-failures/many msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8}}
main
wip-57864
main
smithi
ubuntu 20.04
rados/perf/{ceph mon_election/classic objectstore/bluestore-bitmap openstack scheduler/wpq_default_shards settings/optimized ubuntu_latest workloads/fio_4M_rand_write}
main
wip-57864
main
smithi
centos 8.stream
rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} mon_election/connectivity msgr-failures/fastclose objectstore/filestore-xfs rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1}
main
wip-57864
main
smithi
rhel 8.6
rados/cephadm/workunits/{0-distro/rhel_8.6_container_tools_3.0 agent/off mon_election/classic task/test_cephadm}
main
wip-57864
main
smithi
ubuntu 20.04
rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} 3-scrub-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} crc-failures/bad_map_crc_failure d-balancer/crush-compat mon_election/classic msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/admin_socket_objecter_requests}
main
wip-57864
main
smithi
centos 8.stream
rados/multimon/{clusters/3 mon_election/classic msgr-failures/few msgr/async no_pools objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} tasks/mon_recovery}
main
wip-57864
main
smithi
ubuntu 20.04
rados/singleton/{all/peer mon_election/classic msgr-failures/none msgr/async-v2only objectstore/filestore-xfs rados supported-random-distro$/{ubuntu_latest}}
main
wip-57864
main
smithi
centos 8.stream
rados/singleton-nomsgr/{all/osd_stale_reads mon_election/connectivity rados supported-random-distro$/{centos_8}}