Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 5123954 2020-06-07 07:01:23 2020-06-07 09:10:00 2020-06-07 09:28:00 0:18:00 0:11:07 0:06:53 smithi master centos 8.1 rados/cephadm/workunits/{distro/centos_latest task/test_orch_cli} 1
pass 5123955 2020-06-07 07:01:24 2020-06-07 09:10:01 2020-06-07 09:28:00 0:17:59 0:12:14 0:05:45 smithi master rhel 8.1 rados/singleton/{all/divergent_priors2 msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8}} 1
pass 5123956 2020-06-07 07:01:25 2020-06-07 09:10:01 2020-06-07 09:56:01 0:46:00 0:36:04 0:09:56 smithi master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/radosbench-high-concurrency} 2
fail 5123957 2020-06-07 07:01:26 2020-06-07 09:11:52 2020-06-07 09:51:53 0:40:01 0:24:52 0:15:09 smithi master ubuntu 18.04 rados/cephadm/with-work/{distro/ubuntu_18.04 fixed-2 mode/packaged msgr/async-v1only start tasks/rados_python} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T09:36:53.653864+00:00 smithi148 bash[13637]: debug 2020-06-07T09:36:53.649+0000 7f5f0e8ab700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon prometheus.a ... ' in syslog

pass 5123958 2020-06-07 07:01:27 2020-06-07 09:11:53 2020-06-07 09:49:53 0:38:00 0:26:41 0:11:19 smithi master centos 8.1 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/few objectstore/bluestore-comp-zlib rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
fail 5123959 2020-06-07 07:01:28 2020-06-07 09:11:53 2020-06-07 09:41:53 0:30:00 0:13:32 0:16:28 smithi master ubuntu 18.04 rados/cephadm/smoke/{distro/ubuntu_latest fixed-2 start} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T09:36:57.697121+00:00 smithi201 bash[10442]: debug 2020-06-07T09:36:57.695+0000 7fb02756d700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon prometheus.a ... ' in syslog

pass 5123960 2020-06-07 07:01:29 2020-06-07 09:11:52 2020-06-07 09:33:52 0:22:00 0:12:55 0:09:05 smithi master rhel 8.1 rados/singleton/{all/dump-stuck msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8}} 1
pass 5123961 2020-06-07 07:01:30 2020-06-07 09:11:53 2020-06-07 09:37:53 0:26:00 0:09:49 0:16:11 smithi master ubuntu 18.04 rados/monthrash/{ceph clusters/9-mons msgr-failures/mon-delay msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} thrashers/many workloads/rados_5925} 2
pass 5123962 2020-06-07 07:01:31 2020-06-07 09:11:53 2020-06-07 09:33:53 0:22:00 0:10:15 0:11:45 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-low-osd-mem-target openstack settings/optimized ubuntu_latest workloads/radosbench_4M_write} 1
pass 5123963 2020-06-07 07:01:32 2020-06-07 09:13:47 2020-06-07 10:59:49 1:46:02 1:38:28 0:07:34 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/radosbench} 2
fail 5123964 2020-06-07 07:01:32 2020-06-07 09:15:49 2020-06-07 09:39:48 0:23:59 0:12:29 0:11:30 smithi master ubuntu 18.04 rados/cephadm/smoke-roleless/{distro/ubuntu_latest start} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T09:30:53.654469+00:00 smithi101 bash[10590]: debug 2020-06-07T09:30:53.650+0000 7fdbbbe20700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon alertmanager.smithi101 ... ' in syslog

pass 5123965 2020-06-07 07:01:33 2020-06-07 09:15:49 2020-06-07 10:25:50 1:10:01 1:03:00 0:07:01 smithi master centos 8.1 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-bitmap supported-random-distro$/{centos_8} tasks/dashboard} 2
pass 5123966 2020-06-07 07:01:34 2020-06-07 09:15:49 2020-06-07 09:33:48 0:17:59 0:10:51 0:07:08 smithi master centos 8.1 rados/objectstore/{backends/alloc-hint supported-random-distro$/{centos_8}} 1
pass 5123967 2020-06-07 07:01:35 2020-06-07 09:15:49 2020-06-07 09:37:49 0:22:00 0:14:28 0:07:32 smithi master rhel 8.1 rados/rest/{mgr-restful supported-random-distro$/{rhel_8}} 1
pass 5123968 2020-06-07 07:01:36 2020-06-07 09:15:49 2020-06-07 09:39:48 0:23:59 0:17:28 0:06:31 smithi master rhel 8.1 rados/singleton-nomsgr/{all/admin_socket_output rados supported-random-distro$/{rhel_8}} 1
pass 5123969 2020-06-07 07:01:37 2020-06-07 09:15:50 2020-06-07 09:45:49 0:29:59 0:12:47 0:17:12 smithi master ubuntu 18.04 rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/crush} 1
pass 5123970 2020-06-07 07:01:38 2020-06-07 09:17:44 2020-06-07 13:33:51 4:16:07 3:39:56 0:36:11 smithi master ubuntu 18.04 rados/upgrade/nautilus-x-singleton/{0-cluster/{openstack start} 1-install/nautilus 2-partial-upgrade/firsthalf 3-thrash/default 4-workload/{rbd-cls rbd-import-export readwrite snaps-few-objects} 5-workload/{radosbench rbd_api} 6-finish-upgrade 7-octopus 8-workload/{rbd-python snaps-many-objects} bluestore-bitmap thrashosds-health ubuntu_latest} 4
pass 5123971 2020-06-07 07:01:39 2020-06-07 09:17:45 2020-06-07 09:49:45 0:32:00 0:26:13 0:05:47 smithi master centos 8.1 rados/valgrind-leaks/{1-start 2-inject-leak/mon centos_latest} 1
pass 5123972 2020-06-07 07:01:39 2020-06-07 09:17:44 2020-06-07 09:43:44 0:26:00 0:14:54 0:11:06 smithi master rhel 8.1 rados/cephadm/orchestrator_cli/{2-node-mgr orchestrator_cli supported-random-distro$/{rhel_8}} 2
pass 5123973 2020-06-07 07:01:40 2020-06-07 09:17:45 2020-06-07 09:51:45 0:34:00 0:27:33 0:06:27 smithi master rhel 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/fastclose objectstore/bluestore-comp-zlib rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=jerasure-k=2-m=1} 2
pass 5123974 2020-06-07 07:01:41 2020-06-07 09:17:45 2020-06-07 09:57:46 0:40:01 0:29:45 0:10:16 smithi master ubuntu 18.04 rados/singleton/{all/ec-lost-unfound msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest}} 1
pass 5123975 2020-06-07 07:01:42 2020-06-07 09:17:48 2020-06-07 09:39:48 0:22:00 0:11:38 0:10:22 smithi master ubuntu 18.04 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/many msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} tasks/scrub_test} 2
pass 5123976 2020-06-07 07:01:43 2020-06-07 09:17:48 2020-06-07 09:47:48 0:30:00 0:18:42 0:11:18 smithi master rhel 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3} 3
pass 5123977 2020-06-07 07:01:44 2020-06-07 09:17:49 2020-06-07 09:49:48 0:31:59 0:22:54 0:09:05 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zstd rados tasks/rados_api_tests validater/lockdep} 2
pass 5123978 2020-06-07 07:01:45 2020-06-07 09:17:56 2020-06-07 09:41:56 0:24:00 0:14:03 0:09:57 smithi master centos 7.6 rados/cephadm/smoke/{distro/centos_7 fixed-2 start} 2
pass 5123979 2020-06-07 07:01:46 2020-06-07 09:19:56 2020-06-07 09:45:55 0:25:59 0:16:50 0:09:09 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-partial-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/osd-delay msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/redirect} 2
pass 5123980 2020-06-07 07:01:47 2020-06-07 09:19:56 2020-06-07 09:45:55 0:25:59 0:08:07 0:17:52 smithi master ubuntu 18.04 rados/multimon/{clusters/9 msgr-failures/many msgr/async-v1only no_pools objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} tasks/mon_clock_with_skews} 3
fail 5123981 2020-06-07 07:01:48 2020-06-07 09:19:56 2020-06-07 09:43:56 0:24:00 0:07:35 0:16:25 smithi master centos 7.6 rados/cephadm/smoke-roleless/{distro/centos_7 start} 2
Failure Reason:

Command failed on smithi197 with status 1: 'sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph-ci/ceph:779de8c3d6f291c76010f3cd826cde6f94255e08 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid d322206c-a8a2-11ea-a06b-001a4aab830c -- ceph orch host add smithi197'

pass 5123982 2020-06-07 07:01:48 2020-06-07 09:19:57 2020-06-07 09:35:56 0:15:59 0:09:26 0:06:33 smithi master centos 8.1 rados/singleton/{all/erasure-code-nonregression msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8}} 1
pass 5123983 2020-06-07 07:01:49 2020-06-07 09:19:57 2020-06-07 09:49:57 0:30:00 0:12:17 0:17:43 smithi master ubuntu 18.04 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 5123984 2020-06-07 07:01:50 2020-06-07 09:21:51 2020-06-07 09:55:50 0:33:59 0:22:30 0:11:29 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-stupid openstack settings/optimized ubuntu_latest workloads/radosbench_omap_write} 1
fail 5123985 2020-06-07 07:01:51 2020-06-07 09:21:51 2020-06-07 09:57:50 0:35:59 0:22:33 0:13:26 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/luminous backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_7.6} msgr-failures/few rados thrashers/pggrow thrashosds-health workloads/rbd_cls} 3
Failure Reason:

reached maximum tries (180) after waiting for 180 seconds

pass 5123986 2020-06-07 07:01:52 2020-06-07 09:22:00 2020-06-07 09:47:59 0:25:59 0:17:38 0:08:21 smithi master rhel 8.1 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-hybrid supported-random-distro$/{rhel_8} tasks/progress} 2
fail 5123987 2020-06-07 07:01:53 2020-06-07 09:24:02 2020-06-07 09:38:02 0:14:00 0:03:20 0:10:40 smithi master ubuntu 18.04 rados/cephadm/upgrade/{1-start 2-start-upgrade 3-wait distro$/{ubuntu_18.04} fixed-2} 2
Failure Reason:

Command failed on smithi171 with status 5: 'sudo systemctl stop ceph-7eec4b3a-a8a2-11ea-a06b-001a4aab830c@mon.a'

pass 5123988 2020-06-07 07:01:54 2020-06-07 09:24:03 2020-06-07 09:50:03 0:26:00 0:14:11 0:11:49 smithi master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/redirect_promote_tests} 2
pass 5123989 2020-06-07 07:01:55 2020-06-07 09:24:03 2020-06-07 09:46:02 0:21:59 0:14:04 0:07:55 smithi master rhel 8.1 rados/singleton-nomsgr/{all/balancer rados supported-random-distro$/{rhel_8}} 1
pass 5123990 2020-06-07 07:01:56 2020-06-07 09:24:04 2020-06-07 09:58:03 0:33:59 0:26:08 0:07:51 smithi master rhel 8.1 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/none thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
fail 5123991 2020-06-07 07:01:56 2020-06-07 09:25:50 2020-06-07 09:45:49 0:19:59 0:13:37 0:06:22 smithi master centos 8.1 rados/cephadm/workunits/{distro/centos_latest task/test_adoption} 1
Failure Reason:

Command failed (workunit test cephadm/test_adoption.sh) on smithi099 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_adoption.sh'

pass 5123992 2020-06-07 07:01:57 2020-06-07 09:25:50 2020-06-07 10:03:50 0:38:00 0:32:07 0:05:53 smithi master rhel 8.1 rados/singleton/{all/lost-unfound-delete msgr-failures/few msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{rhel_8}} 1
pass 5123993 2020-06-07 07:01:58 2020-06-07 09:25:50 2020-06-07 10:13:50 0:48:00 0:34:53 0:13:07 smithi master ubuntu 18.04 rados/cephadm/with-work/{distro/ubuntu_18.04_podman fixed-2 mode/root msgr/async-v2only start tasks/rados_api_tests} 2
pass 5123994 2020-06-07 07:01:59 2020-06-07 09:25:50 2020-06-07 10:07:50 0:42:00 0:29:26 0:12:34 smithi master ubuntu 18.04 rados/monthrash/{ceph clusters/3-mons msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} thrashers/one workloads/rados_api_tests} 2
pass 5123995 2020-06-07 07:02:00 2020-06-07 09:25:59 2020-06-07 10:01:59 0:36:00 0:27:50 0:08:10 smithi master rhel 8.1 rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/few rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/minsize_recovery thrashosds-health workloads/ec-snaps-few-objects-overwrites} 2
pass 5123996 2020-06-07 07:02:01 2020-06-07 09:28:14 2020-06-07 09:48:11 0:19:57 0:11:27 0:08:30 smithi master centos 8.0 rados/cephadm/smoke/{distro/centos_8.0 fixed-2 start} 2
pass 5123997 2020-06-07 07:02:02 2020-06-07 09:28:13 2020-06-07 09:48:12 0:19:59 0:12:51 0:07:08 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/redirect_set_object} 2
pass 5123998 2020-06-07 07:02:03 2020-06-07 09:28:12 2020-06-07 10:06:12 0:38:00 0:32:18 0:05:42 smithi master rhel 8.1 rados/singleton/{all/lost-unfound msgr-failures/many msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8}} 1
pass 5123999 2020-06-07 07:02:03 2020-06-07 09:28:15 2020-06-07 09:50:13 0:21:58 0:12:49 0:09:09 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-basic-min-osd-mem-target openstack settings/optimized ubuntu_latest workloads/sample_fio} 1
pass 5124000 2020-06-07 07:02:04 2020-06-07 09:28:14 2020-06-07 10:06:13 0:37:59 0:25:25 0:12:34 smithi master ubuntu 18.04 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/few msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} tasks/rados_api_tests} 2
pass 5124001 2020-06-07 07:02:05 2020-06-07 09:28:14 2020-06-07 09:46:12 0:17:58 0:10:25 0:07:33 smithi master centos 8.0 rados/cephadm/smoke-roleless/{distro/centos_8.0 start} 2
pass 5124002 2020-06-07 07:02:06 2020-06-07 09:28:15 2020-06-07 10:10:13 0:41:58 0:27:57 0:14:01 smithi master ubuntu 18.04 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast msgr-failures/few objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=3-m=1} 2
pass 5124003 2020-06-07 07:02:07 2020-06-07 09:30:05 2020-06-07 10:10:05 0:40:00 0:33:45 0:06:15 smithi master centos 8.1 rados/singleton-bluestore/{all/cephtool msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8}} 1
pass 5124004 2020-06-07 07:02:08 2020-06-07 09:30:05 2020-06-07 10:44:06 1:14:01 1:05:14 0:08:47 smithi master rhel 8.1 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-lz4 supported-random-distro$/{rhel_8} tasks/dashboard} 2
pass 5124005 2020-06-07 07:02:09 2020-06-07 09:30:06 2020-06-07 09:50:05 0:19:59 0:12:20 0:07:39 smithi master centos 8.1 rados/objectstore/{backends/ceph_objectstore_tool supported-random-distro$/{centos_8}} 1
pass 5124006 2020-06-07 07:02:10 2020-06-07 09:30:05 2020-06-07 10:06:05 0:36:00 0:25:42 0:10:18 smithi master centos 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/fastclose objectstore/bluestore-hybrid rados recovery-overrides/{more-async-recovery} supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
pass 5124007 2020-06-07 07:02:11 2020-06-07 09:32:04 2020-06-07 12:44:09 3:12:05 3:02:49 0:09:16 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} msgr-failures/few msgr/async-v2only objectstore/bluestore-hybrid rados tasks/rados_cls_all validater/valgrind} 2
fail 5124008 2020-06-07 07:02:11 2020-06-07 09:32:04 2020-06-07 09:52:04 0:20:00 0:10:02 0:09:58 smithi master ubuntu 18.04 rados/cephadm/workunits/{distro/ubuntu_18.04_podman task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi061 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

pass 5124009 2020-06-07 07:02:12 2020-06-07 09:32:04 2020-06-07 09:50:04 0:18:00 0:07:42 0:10:18 smithi master ubuntu 18.04 rados/singleton/{all/max-pg-per-osd.from-mon msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124010 2020-06-07 07:02:13 2020-06-07 09:34:05 2020-06-07 09:48:05 0:14:00 0:08:50 0:05:10 smithi master centos 8.1 rados/singleton-nomsgr/{all/cache-fs-trunc rados supported-random-distro$/{centos_8}} 1
pass 5124011 2020-06-07 07:02:14 2020-06-07 09:34:06 2020-06-07 09:56:05 0:21:59 0:10:18 0:11:41 smithi master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/osd-delay msgr/async objectstore/filestore-xfs rados supported-random-distro$/{ubuntu_latest} thrashers/none thrashosds-health workloads/set-chunk-promote-flush} 2
pass 5124012 2020-06-07 07:02:15 2020-06-07 09:34:05 2020-06-07 10:06:05 0:32:00 0:23:31 0:08:29 smithi master centos 8.1 rados/multimon/{clusters/21 msgr-failures/few msgr/async-v2only no_pools objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{centos_8} tasks/mon_recovery} 3
pass 5124013 2020-06-07 07:02:16 2020-06-07 09:34:06 2020-06-07 09:56:05 0:21:59 0:11:45 0:10:14 smithi master centos 8.1 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/fastclose objectstore/bluestore-hybrid rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 5124014 2020-06-07 07:02:17 2020-06-07 09:35:55 2020-06-07 10:03:55 0:28:00 0:21:57 0:06:03 smithi master centos 8.0 rados/cephadm/with-work/{distro/centos_8.0 fixed-2 mode/packaged msgr/async-v2only start tasks/rados_python} 2
pass 5124015 2020-06-07 07:02:18 2020-06-07 09:35:55 2020-06-07 09:57:55 0:22:00 0:14:06 0:07:54 smithi master rhel 8.1 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-low-osd-mem-target supported-random-distro$/{rhel_8} tasks/prometheus} 2
pass 5124016 2020-06-07 07:02:19 2020-06-07 09:35:57 2020-06-07 10:19:58 0:44:01 0:35:12 0:08:49 smithi master ubuntu 18.04 rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/erasure-code} 1
fail 5124017 2020-06-07 07:02:20 2020-06-07 09:38:00 2020-06-07 10:14:00 0:36:00 0:22:50 0:13:10 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/mimic-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_7.6} msgr-failures/osd-delay rados thrashers/careful thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

reached maximum tries (180) after waiting for 180 seconds

pass 5124018 2020-06-07 07:02:20 2020-06-07 09:38:01 2020-06-07 09:58:00 0:19:59 0:10:27 0:09:32 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-bitmap openstack settings/optimized ubuntu_latest workloads/sample_radosbench} 1
pass 5124019 2020-06-07 07:02:21 2020-06-07 09:38:00 2020-06-07 09:56:00 0:18:00 0:10:35 0:07:25 smithi master centos 8.1 rados/cephadm/smoke/{distro/centos_latest fixed-2 start} 2
pass 5124020 2020-06-07 07:02:22 2020-06-07 09:38:01 2020-06-07 09:56:00 0:17:59 0:12:15 0:05:44 smithi master centos 8.1 rados/singleton/{all/max-pg-per-osd.from-primary msgr-failures/many msgr/async objectstore/filestore-xfs rados supported-random-distro$/{centos_8}} 1
pass 5124021 2020-06-07 07:02:23 2020-06-07 09:38:01 2020-06-07 10:00:00 0:21:59 0:15:38 0:06:21 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/set-chunks-read} 2
pass 5124022 2020-06-07 07:02:24 2020-06-07 09:38:00 2020-06-07 10:18:01 0:40:01 0:29:42 0:10:19 smithi master ubuntu 18.04 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/fastclose objectstore/bluestore-hybrid rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 5124023 2020-06-07 07:02:25 2020-06-07 09:38:03 2020-06-07 09:56:02 0:17:59 0:10:08 0:07:51 smithi master centos 8.1 rados/cephadm/smoke-roleless/{distro/centos_latest start} 2
pass 5124024 2020-06-07 07:02:26 2020-06-07 09:40:03 2020-06-07 09:54:02 0:13:59 0:06:54 0:07:05 smithi master centos 8.1 rados/cephadm/workunits/{distro/centos_latest task/test_cephadm_repos} 1
pass 5124025 2020-06-07 07:02:27 2020-06-07 09:40:03 2020-06-07 10:00:02 0:19:59 0:12:32 0:07:27 smithi master ubuntu 18.04 rados/singleton/{all/max-pg-per-osd.from-replica msgr-failures/few msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124026 2020-06-07 07:02:27 2020-06-07 09:40:03 2020-06-07 10:00:02 0:19:59 0:10:30 0:09:29 smithi master ubuntu 18.04 rados/singleton-nomsgr/{all/ceph-kvstore-tool rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124027 2020-06-07 07:02:28 2020-06-07 09:40:03 2020-06-07 10:42:04 1:02:01 0:52:00 0:10:01 smithi master ubuntu 18.04 rados/monthrash/{ceph clusters/9-mons msgr-failures/mon-delay msgr/async objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest} thrashers/sync-many workloads/rados_mon_osdmap_prune} 2
pass 5124028 2020-06-07 07:02:29 2020-06-07 09:40:03 2020-06-07 10:08:03 0:28:00 0:20:30 0:07:30 smithi master rhel 8.1 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} tasks/rados_cls_all} 2
pass 5124029 2020-06-07 07:02:30 2020-06-07 09:41:00 2020-06-07 10:19:00 0:38:00 0:31:18 0:06:42 smithi master centos 8.1 rados/cephadm/with-work/{distro/centos_latest fixed-2 mode/root msgr/async start tasks/rados_api_tests} 2
pass 5124030 2020-06-07 07:02:31 2020-06-07 09:41:59 2020-06-07 10:13:59 0:32:00 0:22:59 0:09:01 smithi master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/small-objects-balanced} 2
pass 5124031 2020-06-07 07:02:32 2020-06-07 09:42:00 2020-06-07 10:07:59 0:25:59 0:16:01 0:09:58 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-bitmap openstack settings/optimized ubuntu_latest workloads/cosbench_64K_read_write} 1
pass 5124032 2020-06-07 07:02:33 2020-06-07 09:41:59 2020-06-07 10:03:59 0:22:00 0:15:19 0:06:41 smithi master rhel 7.7 rados/cephadm/smoke/{distro/rhel_7 fixed-2 start} 2
pass 5124033 2020-06-07 07:02:34 2020-06-07 09:42:00 2020-06-07 11:06:01 1:24:01 1:17:02 0:06:59 smithi master rhel 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/osd-delay objectstore/bluestore-hybrid rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{rhel_8} thrashers/fastread thrashosds-health workloads/ec-radosbench} 2
pass 5124034 2020-06-07 07:02:34 2020-06-07 09:42:00 2020-06-07 10:01:59 0:19:59 0:13:53 0:06:06 smithi master rhel 8.1 rados/singleton/{all/mon-auth-caps msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8}} 1
pass 5124035 2020-06-07 07:02:35 2020-06-07 09:43:57 2020-06-07 10:05:56 0:21:59 0:14:12 0:07:47 smithi master rhel 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/few objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3} 3
pass 5124036 2020-06-07 07:02:36 2020-06-07 09:43:57 2020-06-07 10:01:57 0:18:00 0:11:18 0:06:42 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados tasks/mon_recovery validater/lockdep} 2
pass 5124037 2020-06-07 07:02:37 2020-06-07 09:43:58 2020-06-07 10:05:57 0:21:59 0:14:51 0:07:08 smithi master rhel 7.7 rados/cephadm/smoke-roleless/{distro/rhel_7 start} 2
pass 5124038 2020-06-07 07:02:38 2020-06-07 09:43:57 2020-06-07 10:23:57 0:40:00 0:34:53 0:05:07 smithi master rhel 8.1 rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/fast msgr-failures/osd-delay rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/ec-pool-snaps-few-objects-overwrites} 2
pass 5124039 2020-06-07 07:02:39 2020-06-07 09:43:58 2020-06-07 10:05:57 0:21:59 0:12:08 0:09:51 smithi master centos 8.1 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/few objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 5124040 2020-06-07 07:02:40 2020-06-07 09:46:07 2020-06-07 11:00:08 1:14:01 1:03:39 0:10:22 smithi master ubuntu 18.04 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-snappy supported-random-distro$/{ubuntu_latest} tasks/dashboard} 2
pass 5124041 2020-06-07 07:02:41 2020-06-07 09:46:07 2020-06-07 10:04:06 0:17:59 0:10:48 0:07:11 smithi master rhel 8.1 rados/objectstore/{backends/filejournal supported-random-distro$/{rhel_8}} 1
pass 5124042 2020-06-07 07:02:41 2020-06-07 09:46:07 2020-06-07 10:04:06 0:17:59 0:11:07 0:06:52 smithi master rhel 8.1 rados/multimon/{clusters/3 msgr-failures/many msgr/async no_pools objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} tasks/mon_clock_no_skews} 2
pass 5124043 2020-06-07 07:02:42 2020-06-07 09:46:07 2020-06-07 10:14:06 0:27:59 0:21:35 0:06:24 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/osd-delay msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/small-objects-localized} 2
pass 5124044 2020-06-07 07:02:43 2020-06-07 09:46:07 2020-06-07 10:08:06 0:21:59 0:12:31 0:09:28 smithi master ubuntu 18.04 rados/cephadm/workunits/{distro/ubuntu_18.04_podman task/test_orch_cli} 1
pass 5124045 2020-06-07 07:02:44 2020-06-07 09:46:13 2020-06-07 10:06:13 0:20:00 0:11:02 0:08:58 smithi master ubuntu 18.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-stupid supported-random-distro$/{ubuntu_latest} tasks/workunits} 2
pass 5124046 2020-06-07 07:02:45 2020-06-07 09:47:48 2020-06-07 10:05:48 0:18:00 0:11:52 0:06:08 smithi master centos 8.1 rados/singleton/{all/mon-config-key-caps msgr-failures/few msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8}} 1
pass 5124047 2020-06-07 07:02:46 2020-06-07 09:47:49 2020-06-07 10:05:48 0:17:59 0:12:11 0:05:48 smithi master rhel 8.1 rados/singleton-nomsgr/{all/ceph-post-file rados supported-random-distro$/{rhel_8}} 1
pass 5124048 2020-06-07 07:02:47 2020-06-07 09:47:50 2020-06-07 10:19:49 0:31:59 0:26:40 0:05:19 smithi master rhel 8.0 rados/cephadm/with-work/{distro/rhel_8.0 fixed-2 mode/packaged msgr/async-v1only start tasks/rados_python} 2
fail 5124049 2020-06-07 07:02:48 2020-06-07 09:47:50 2020-06-07 10:21:50 0:34:00 0:22:36 0:11:24 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/mimic backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_7.6} msgr-failures/fastclose rados thrashers/default thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

reached maximum tries (180) after waiting for 180 seconds

pass 5124050 2020-06-07 07:02:49 2020-06-07 09:48:01 2020-06-07 10:14:00 0:25:59 0:16:41 0:09:18 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-comp openstack settings/optimized ubuntu_latest workloads/cosbench_64K_write} 1
pass 5124051 2020-06-07 07:02:50 2020-06-07 09:48:06 2020-06-07 10:22:06 0:34:00 0:23:03 0:10:57 smithi master ubuntu 18.04 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/few objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 5124052 2020-06-07 07:02:51 2020-06-07 09:48:13 2020-06-07 10:08:12 0:19:59 0:13:52 0:06:07 smithi master rhel 8.0 rados/cephadm/smoke/{distro/rhel_8.0 fixed-2 start} 2
pass 5124053 2020-06-07 07:02:52 2020-06-07 09:48:13 2020-06-07 10:10:13 0:22:00 0:15:49 0:06:11 smithi master centos 8.1 rados/singleton/{all/mon-config-keys msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8}} 1
pass 5124054 2020-06-07 07:02:53 2020-06-07 09:50:02 2020-06-07 10:22:02 0:32:00 0:25:29 0:06:31 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-active-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/small-objects} 2
pass 5124055 2020-06-07 07:02:54 2020-06-07 09:50:02 2020-06-07 10:16:02 0:26:00 0:19:02 0:06:58 smithi master rhel 8.1 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/few msgr/async objectstore/filestore-xfs rados supported-random-distro$/{rhel_8} tasks/rados_python} 2
pass 5124056 2020-06-07 07:02:54 2020-06-07 09:50:03 2020-06-07 10:10:02 0:19:59 0:13:05 0:06:54 smithi master rhel 8.0 rados/cephadm/smoke-roleless/{distro/rhel_8.0 start} 2
pass 5124057 2020-06-07 07:02:55 2020-06-07 09:50:03 2020-06-07 10:28:03 0:38:00 0:32:20 0:05:40 smithi master centos 8.1 rados/monthrash/{ceph clusters/3-mons msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} thrashers/sync workloads/rados_mon_workunits} 2
pass 5124058 2020-06-07 07:02:56 2020-06-07 09:50:03 2020-06-07 10:12:02 0:21:59 0:13:11 0:08:48 smithi master ubuntu 18.04 rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/mgr} 1
fail 5124059 2020-06-07 07:02:57 2020-06-07 09:50:04 2020-06-07 10:10:03 0:19:59 0:10:05 0:09:54 smithi master ubuntu 18.04 rados/cephadm/workunits/{distro/ubuntu_18.04_podman task/test_adoption} 1
Failure Reason:

Command failed (workunit test cephadm/test_adoption.sh) on smithi148 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_adoption.sh'

pass 5124060 2020-06-07 07:02:58 2020-06-07 09:50:05 2020-06-07 10:10:05 0:20:00 0:13:34 0:06:26 smithi master rhel 8.1 rados/singleton/{all/mon-config msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8}} 1
pass 5124061 2020-06-07 07:02:59 2020-06-07 09:50:06 2020-06-07 10:08:05 0:17:59 0:07:45 0:10:14 smithi master ubuntu 18.04 rados/singleton-nomsgr/{all/export-after-evict rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124062 2020-06-07 07:03:00 2020-06-07 09:50:14 2020-06-07 10:28:14 0:38:00 0:27:08 0:10:52 smithi master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/snaps-few-objects-balanced} 2
dead 5124063 2020-06-07 07:03:01 2020-06-07 09:52:02 2020-06-07 21:54:36 12:02:34 smithi master centos 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast msgr-failures/fastclose objectstore/bluestore-low-osd-mem-target rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{centos_8} thrashers/minsize_recovery thrashosds-health workloads/ec-small-objects-balanced} 2
pass 5124064 2020-06-07 07:03:02 2020-06-07 09:52:02 2020-06-07 10:10:02 0:18:00 0:12:07 0:05:53 smithi master rhel 8.1 rados/cephadm/smoke/{distro/rhel_latest fixed-2 start} 2
pass 5124065 2020-06-07 07:03:03 2020-06-07 09:52:02 2020-06-07 10:16:02 0:24:00 0:12:38 0:11:22 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-low-osd-mem-target openstack settings/optimized ubuntu_latest workloads/fio_4K_rand_read} 1
pass 5124066 2020-06-07 07:03:03 2020-06-07 09:52:02 2020-06-07 10:28:02 0:36:00 0:26:05 0:09:55 smithi master centos 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/osd-delay objectstore/bluestore-stupid rados recovery-overrides/{default} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
fail 5124067 2020-06-07 07:03:04 2020-06-07 09:52:05 2020-06-07 18:10:19 8:18:14 8:07:57 0:10:17 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} msgr-failures/few msgr/async-v1only objectstore/bluestore-stupid rados tasks/rados_api_tests validater/valgrind} 2
Failure Reason:

Command failed (workunit test rados/test.sh) on smithi103 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 ALLOW_TIMEOUTS=1 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/test.sh'

pass 5124068 2020-06-07 07:03:05 2020-06-07 09:54:21 2020-06-07 10:16:21 0:22:00 0:14:29 0:07:31 smithi master rhel 8.1 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/osd-delay objectstore/bluestore-stupid rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 5124069 2020-06-07 07:03:06 2020-06-07 09:56:04 2020-06-07 10:14:04 0:18:00 0:11:24 0:06:36 smithi master rhel 8.1 rados/cephadm/smoke-roleless/{distro/rhel_latest start} 2
pass 5124070 2020-06-07 07:03:07 2020-06-07 09:56:05 2020-06-07 10:18:04 0:21:59 0:14:55 0:07:04 smithi master centos 8.1 rados/singleton/{all/osd-backfill msgr-failures/many msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8}} 1
pass 5124071 2020-06-07 07:03:08 2020-06-07 09:56:05 2020-06-07 10:16:04 0:19:59 0:07:30 0:12:29 smithi master ubuntu 18.04 rados/multimon/{clusters/6 msgr-failures/few msgr/async-v1only no_pools objectstore/filestore-xfs rados supported-random-distro$/{ubuntu_latest} tasks/mon_clock_with_skews} 2
pass 5124072 2020-06-07 07:03:09 2020-06-07 09:56:05 2020-06-07 10:16:04 0:19:59 0:13:24 0:06:35 smithi master rhel 8.1 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/filestore-xfs supported-random-distro$/{rhel_8} tasks/crash} 2
pass 5124073 2020-06-07 07:03:10 2020-06-07 09:56:05 2020-06-07 10:36:05 0:40:00 0:33:39 0:06:21 smithi master rhel 8.1 rados/cephadm/with-work/{distro/rhel_latest fixed-2 mode/root msgr/async-v2only start tasks/rados_api_tests} 2
pass 5124074 2020-06-07 07:03:10 2020-06-07 09:56:05 2020-06-07 10:26:05 0:30:00 0:23:54 0:06:06 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/osd-delay msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/snaps-few-objects-localized} 2
pass 5124075 2020-06-07 07:03:11 2020-06-07 09:56:07 2020-06-07 10:40:07 0:44:00 0:34:29 0:09:31 smithi master ubuntu 18.04 rados/singleton-bluestore/{all/cephtool msgr-failures/many msgr/async-v2only objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124076 2020-06-07 07:03:12 2020-06-07 09:56:07 2020-06-07 11:10:08 1:14:01 1:05:21 0:08:40 smithi master rhel 8.1 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-zlib supported-random-distro$/{rhel_8} tasks/dashboard} 2
pass 5124077 2020-06-07 07:03:13 2020-06-07 09:58:03 2020-06-07 12:28:06 2:30:03 2:24:56 0:05:07 smithi master centos 8.1 rados/objectstore/{backends/filestore-idempotent-aio-journal supported-random-distro$/{centos_8}} 1
pass 5124078 2020-06-07 07:03:14 2020-06-07 09:58:03 2020-06-07 10:24:03 0:26:00 0:19:17 0:06:43 smithi master centos 8.1 rados/valgrind-leaks/{1-start 2-inject-leak/none centos_latest} 1
fail 5124079 2020-06-07 07:03:15 2020-06-07 09:58:03 2020-06-07 10:18:03 0:20:00 0:13:42 0:06:18 smithi master centos 8.1 rados/cephadm/workunits/{distro/centos_latest task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi006 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

pass 5124080 2020-06-07 07:03:15 2020-06-07 09:58:03 2020-06-07 10:30:03 0:32:00 0:22:04 0:09:56 smithi master ubuntu 18.04 rados/singleton/{all/osd-recovery-incomplete msgr-failures/few msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124081 2020-06-07 07:03:16 2020-06-07 09:58:08 2020-06-07 10:28:08 0:30:00 0:19:59 0:10:01 smithi master ubuntu 18.04 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/many msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest} tasks/rados_stress_watch} 2
pass 5124082 2020-06-07 07:03:17 2020-06-07 10:00:18 2020-06-07 10:18:17 0:17:59 0:08:28 0:09:31 smithi master ubuntu 18.04 rados/singleton-nomsgr/{all/full-tiering rados supported-random-distro$/{ubuntu_latest}} 1
fail 5124083 2020-06-07 07:03:18 2020-06-07 10:00:18 2020-06-07 10:36:18 0:36:00 0:23:29 0:12:31 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus-v1only backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_7.6} msgr-failures/few rados thrashers/mapgap thrashosds-health workloads/cache-snaps} 3
Failure Reason:

reached maximum tries (180) after waiting for 180 seconds

pass 5124084 2020-06-07 07:03:19 2020-06-07 10:00:18 2020-06-07 10:38:18 0:38:00 0:31:51 0:06:09 smithi master rhel 8.1 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/osd-delay objectstore/bluestore-stupid rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 5124085 2020-06-07 07:03:20 2020-06-07 10:02:09 2020-06-07 10:24:09 0:22:00 0:12:53 0:09:07 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-stupid openstack settings/optimized ubuntu_latest workloads/fio_4K_rand_rw} 1
fail 5124086 2020-06-07 07:03:21 2020-06-07 10:02:10 2020-06-07 10:26:09 0:23:59 0:13:06 0:10:53 smithi master ubuntu 18.04 rados/cephadm/smoke/{distro/ubuntu_18.04 fixed-2 start} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T10:22:24.096239+00:00 smithi189 bash[10477]: debug 2020-06-07T10:22:24.090+0000 7f275c4f7700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon prometheus.a ... ' in syslog

pass 5124087 2020-06-07 07:03:22 2020-06-07 10:02:09 2020-06-07 10:34:09 0:32:00 0:25:16 0:06:44 smithi master rhel 8.1 rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/fastclose rados recovery-overrides/{default} supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/ec-small-objects-fast-read-overwrites} 2
pass 5124088 2020-06-07 07:03:23 2020-06-07 10:02:10 2020-06-07 10:40:09 0:37:59 0:30:18 0:07:41 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/snaps-few-objects} 2
pass 5124089 2020-06-07 07:03:24 2020-06-07 10:04:08 2020-06-07 10:26:07 0:21:59 0:12:44 0:09:15 smithi master ubuntu 18.04 rados/singleton/{all/osd-recovery msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest}} 1
fail 5124090 2020-06-07 07:03:25 2020-06-07 10:04:08 2020-06-07 10:26:08 0:22:00 0:12:35 0:09:25 smithi master ubuntu 18.04 rados/cephadm/smoke-roleless/{distro/ubuntu_18.04 start} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T10:18:21.929556+00:00 smithi093 bash[10437]: debug 2020-06-07T10:18:21.925+0000 7fca89ce7700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon alertmanager.smithi093 ... ' in syslog

pass 5124091 2020-06-07 07:03:26 2020-06-07 10:04:08 2020-06-07 10:44:08 0:40:00 0:29:57 0:10:03 smithi master ubuntu 18.04 rados/monthrash/{ceph clusters/9-mons msgr-failures/mon-delay msgr/async-v2only objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} thrashers/force-sync-many workloads/snaps-few-objects} 2
fail 5124092 2020-06-07 07:03:26 2020-06-07 10:04:08 2020-06-07 10:40:08 0:36:00 0:25:03 0:10:57 smithi master ubuntu 18.04 rados/cephadm/with-work/{distro/ubuntu_18.04 fixed-2 mode/packaged msgr/async start tasks/rados_python} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T10:25:13.887545+00:00 smithi154 bash[13569]: debug 2020-06-07T10:25:13.883+0000 7f30a08b3700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon prometheus.a ... ' in syslog

pass 5124093 2020-06-07 07:03:27 2020-06-07 10:04:08 2020-06-07 10:28:08 0:24:00 0:18:08 0:05:52 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/write_fadvise_dontneed} 2
pass 5124094 2020-06-07 07:03:28 2020-06-07 10:06:06 2020-06-07 10:38:05 0:31:59 0:25:54 0:06:05 smithi master rhel 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/few objectstore/bluestore-stupid rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/ec-small-objects-fast-read} 2
pass 5124095 2020-06-07 07:03:29 2020-06-07 10:06:06 2020-06-07 10:20:05 0:13:59 0:07:58 0:06:01 smithi master centos 8.1 rados/singleton/{all/peer msgr-failures/few msgr/async objectstore/filestore-xfs rados supported-random-distro$/{centos_8}} 1
pass 5124096 2020-06-07 07:03:30 2020-06-07 10:06:06 2020-06-07 10:22:05 0:15:59 0:06:51 0:09:08 smithi master ubuntu 18.04 rados/cephadm/workunits/{distro/ubuntu_18.04_podman task/test_cephadm_repos} 1
fail 5124097 2020-06-07 07:03:31 2020-06-07 10:06:07 2020-06-07 10:28:07 0:22:00 0:15:56 0:06:04 smithi master rhel 8.1 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/fastclose objectstore/filestore-xfs rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
Failure Reason:

Command failed on smithi107 with status 124: 'sudo adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 120 ceph --cluster ceph --log-early osd last-stat-seq osd.10'

pass 5124098 2020-06-07 07:03:32 2020-06-07 10:06:06 2020-06-07 10:26:05 0:19:59 0:13:44 0:06:15 smithi master rhel 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/fastclose objectstore/filestore-xfs rados recovery-overrides/{more-async-recovery} supported-random-distro$/{rhel_8} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3} 3
pass 5124099 2020-06-07 07:03:33 2020-06-07 10:06:06 2020-06-07 10:30:06 0:24:00 0:17:10 0:06:50 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none msgr-failures/few msgr/async-v2only objectstore/filestore-xfs rados tasks/rados_cls_all validater/lockdep} 2
pass 5124100 2020-06-07 07:03:34 2020-06-07 10:06:07 2020-06-07 10:26:06 0:19:59 0:10:47 0:09:12 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-basic-min-osd-mem-target openstack settings/optimized ubuntu_latest workloads/fio_4M_rand_read} 1
pass 5124101 2020-06-07 07:03:35 2020-06-07 10:06:06 2020-06-07 10:26:05 0:19:59 0:11:41 0:08:18 smithi master ubuntu 18.04 rados/singleton-nomsgr/{all/health-warnings rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124102 2020-06-07 07:03:35 2020-06-07 10:06:07 2020-06-07 10:30:07 0:24:00 0:13:56 0:10:04 smithi master ubuntu 18.04 rados/cephadm/smoke/{distro/ubuntu_18.04_podman fixed-2 start} 2
pass 5124103 2020-06-07 07:03:36 2020-06-07 10:06:07 2020-06-07 10:28:07 0:22:00 0:12:38 0:09:22 smithi master ubuntu 18.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-bitmap supported-random-distro$/{ubuntu_latest} tasks/failover} 2
pass 5124104 2020-06-07 07:03:37 2020-06-07 10:06:13 2020-06-07 10:30:13 0:24:00 0:16:40 0:07:20 smithi master centos 8.1 rados/multimon/{clusters/9 msgr-failures/many msgr/async-v2only no_pools objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} tasks/mon_recovery} 3
pass 5124105 2020-06-07 07:03:38 2020-06-07 10:06:14 2020-06-07 10:26:14 0:20:00 0:09:10 0:10:50 smithi master ubuntu 18.04 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} tasks/rados_striper} 2
pass 5124106 2020-06-07 07:03:39 2020-06-07 10:06:15 2020-06-07 10:30:14 0:23:59 0:16:39 0:07:20 smithi master rhel 8.1 rados/singleton/{all/pg-autoscaler msgr-failures/many msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8}} 2
pass 5124107 2020-06-07 07:03:40 2020-06-07 10:08:07 2020-06-07 10:30:06 0:21:59 0:12:28 0:09:31 smithi master ubuntu 18.04 rados/cephadm/smoke-roleless/{distro/ubuntu_18.04_podman start} 2
pass 5124108 2020-06-07 07:03:41 2020-06-07 10:08:07 2020-06-07 10:44:07 0:36:00 0:26:13 0:09:47 smithi master ubuntu 18.04 rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/misc} 1
pass 5124109 2020-06-07 07:03:42 2020-06-07 10:08:07 2020-06-07 10:34:07 0:26:00 0:19:25 0:06:35 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-active-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/osd-delay msgr/async objectstore/filestore-xfs rados supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/admin_socket_objecter_requests} 2
pass 5124110 2020-06-07 07:03:43 2020-06-07 10:08:07 2020-06-07 10:52:08 0:44:01 0:34:50 0:09:11 smithi master ubuntu 18.04 rados/cephadm/with-work/{distro/ubuntu_18.04_podman fixed-2 mode/root msgr/async-v1only start tasks/rados_api_tests} 2
pass 5124111 2020-06-07 07:03:43 2020-06-07 10:08:07 2020-06-07 11:18:09 1:10:02 1:02:53 0:07:09 smithi master centos 8.1 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-zstd supported-random-distro$/{centos_8} tasks/dashboard} 2
pass 5124112 2020-06-07 07:03:44 2020-06-07 10:08:13 2020-06-07 12:22:16 2:14:03 2:08:56 0:05:07 smithi master rhel 8.1 rados/objectstore/{backends/filestore-idempotent supported-random-distro$/{rhel_8}} 1
pass 5124113 2020-06-07 07:03:45 2020-06-07 10:10:19 2020-06-07 10:44:19 0:34:00 0:23:44 0:10:16 smithi master ubuntu 18.04 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/fastclose objectstore/filestore-xfs rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/mapgap thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 5124114 2020-06-07 07:03:46 2020-06-07 10:10:19 2020-06-07 10:28:19 0:18:00 0:11:08 0:06:52 smithi master rhel 8.1 rados/singleton/{all/pg-removal-interruption msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8}} 1
fail 5124115 2020-06-07 07:03:47 2020-06-07 10:10:20 2020-06-07 10:24:19 0:13:59 0:06:48 0:07:11 smithi master centos 8.1 rados/cephadm/workunits/{distro/centos_latest task/test_orch_cli} 1
Failure Reason:

Command failed on smithi061 with status 5: 'sudo systemctl stop ceph-9b17a4d4-a8a8-11ea-a06b-001a4aab830c@mon.a'

pass 5124116 2020-06-07 07:03:48 2020-06-07 10:10:19 2020-06-07 11:52:21 1:42:02 1:27:19 0:14:43 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/nautilus-v2only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_7.6} msgr-failures/osd-delay rados thrashers/morepggrow thrashosds-health workloads/radosbench} 3
pass 5124117 2020-06-07 07:03:49 2020-06-07 10:10:20 2020-06-07 10:30:19 0:19:59 0:10:59 0:09:00 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-bitmap openstack settings/optimized ubuntu_latest workloads/fio_4M_rand_rw} 1
pass 5124118 2020-06-07 07:03:49 2020-06-07 10:10:19 2020-06-07 10:36:19 0:26:00 0:19:27 0:06:33 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} thrashers/mapgap thrashosds-health workloads/cache-agent-big} 2
pass 5124119 2020-06-07 07:03:50 2020-06-07 10:10:20 2020-06-07 10:30:19 0:19:59 0:13:14 0:06:45 smithi master rhel 8.1 rados/singleton-nomsgr/{all/large-omap-object-warnings rados supported-random-distro$/{rhel_8}} 1
fail 5124120 2020-06-07 07:03:51 2020-06-07 10:12:23 2020-06-07 10:36:22 0:23:59 0:13:15 0:10:44 smithi master ubuntu 18.04 rados/cephadm/smoke/{distro/ubuntu_latest fixed-2 start} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T10:30:58.994397+00:00 smithi063 bash[10601]: debug 2020-06-07T10:30:58.991+0000 7f475c9bd700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon prometheus.a ... ' in syslog

pass 5124121 2020-06-07 07:03:52 2020-06-07 10:14:07 2020-06-07 10:42:07 0:28:00 0:17:18 0:10:42 smithi master ubuntu 18.04 rados/monthrash/{ceph clusters/3-mons msgr-failures/few msgr/async objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} thrashers/many workloads/pool-create-delete} 2
pass 5124122 2020-06-07 07:03:53 2020-06-07 10:14:07 2020-06-07 10:44:07 0:30:00 0:24:12 0:05:48 smithi master rhel 8.1 rados/singleton/{all/radostool msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8}} 1
fail 5124123 2020-06-07 07:03:54 2020-06-07 10:14:07 2020-06-07 10:38:07 0:24:00 0:12:42 0:11:18 smithi master ubuntu 18.04 rados/cephadm/smoke-roleless/{distro/ubuntu_latest start} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T10:28:51.120231+00:00 smithi040 bash[10477]: debug 2020-06-07T10:28:51.116+0000 7f49921ff700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon alertmanager.smithi040 ... ' in syslog

pass 5124124 2020-06-07 07:03:55 2020-06-07 10:14:08 2020-06-07 10:44:07 0:29:59 0:24:15 0:05:44 smithi master rhel 8.1 rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/fast msgr-failures/few rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-small-objects-overwrites} 2
pass 5124125 2020-06-07 07:03:56 2020-06-07 10:14:08 2020-06-07 10:34:07 0:19:59 0:12:01 0:07:58 smithi master centos 8.1 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/fastclose objectstore/bluestore-bitmap rados recovery-overrides/{default} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 5124126 2020-06-07 07:03:56 2020-06-07 10:14:08 2020-06-07 10:42:07 0:27:59 0:21:10 0:06:49 smithi master centos 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast msgr-failures/osd-delay objectstore/filestore-xfs rados recovery-overrides/{default} supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/ec-small-objects-many-deletes} 2
pass 5124127 2020-06-07 07:03:57 2020-06-07 10:14:08 2020-06-07 10:52:08 0:38:00 0:30:55 0:07:05 smithi master centos 8.0 rados/cephadm/with-work/{distro/centos_8.0 fixed-2 mode/root msgr/async-v1only start tasks/rados_api_tests} 2
pass 5124128 2020-06-07 07:03:58 2020-06-07 10:14:08 2020-06-07 10:38:07 0:23:59 0:14:02 0:09:57 smithi master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/cache-agent-small} 2
pass 5124129 2020-06-07 07:03:59 2020-06-07 10:16:25 2020-06-07 10:58:25 0:42:00 0:34:44 0:07:16 smithi master centos 8.1 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/many msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8} tasks/rados_workunit_loadgen_big} 2
pass 5124130 2020-06-07 07:04:00 2020-06-07 10:16:26 2020-06-07 10:54:26 0:38:00 0:29:20 0:08:40 smithi master rhel 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/few objectstore/bluestore-bitmap rados recovery-overrides/{default} supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
pass 5124131 2020-06-07 07:04:01 2020-06-07 10:16:25 2020-06-07 14:10:30 3:54:05 3:47:38 0:06:27 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} msgr-failures/few msgr/async objectstore/bluestore-bitmap rados tasks/rados_cls_all validater/valgrind} 2
pass 5124132 2020-06-07 07:04:02 2020-06-07 10:16:26 2020-06-07 10:50:25 0:33:59 0:24:20 0:09:39 smithi master ubuntu 18.04 rados/singleton/{all/random-eio msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{ubuntu_latest}} 2
pass 5124133 2020-06-07 07:04:03 2020-06-07 10:16:25 2020-06-07 10:36:24 0:19:59 0:14:45 0:05:14 smithi master rhel 8.1 rados/cephadm/orchestrator_cli/{2-node-mgr orchestrator_cli supported-random-distro$/{rhel_8}} 2
pass 5124134 2020-06-07 07:04:03 2020-06-07 10:18:19 2020-06-07 10:42:18 0:23:59 0:13:10 0:10:49 smithi master ubuntu 18.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-lz4 supported-random-distro$/{ubuntu_latest} tasks/insights} 2
pass 5124135 2020-06-07 07:04:04 2020-06-07 10:18:19 2020-06-07 10:42:19 0:24:00 0:14:24 0:09:36 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-comp openstack settings/optimized ubuntu_latest workloads/fio_4M_rand_write} 1
fail 5124136 2020-06-07 07:04:05 2020-06-07 10:18:19 2020-06-07 10:44:18 0:25:59 0:08:31 0:17:28 smithi master ubuntu 18.04 rados/multimon/{clusters/21 msgr-failures/few msgr/async no_pools objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest} tasks/mon_clock_no_skews} 3
Failure Reason:

"2020-06-07T10:41:12.021472+0000 mon.b (mon.0) 21 : cluster [WRN] Health check failed: 2/21 mons down, quorum b,c,a,e,f,d,h,i,g,l,j,n,o,m,q,r,p,u,s (MON_DOWN)" in cluster log

pass 5124137 2020-06-07 07:04:06 2020-06-07 10:18:19 2020-06-07 10:42:19 0:24:00 0:13:32 0:10:28 smithi master centos 7.6 rados/cephadm/smoke/{distro/centos_7 fixed-2 start} 2
pass 5124138 2020-06-07 07:04:07 2020-06-07 10:19:01 2020-06-07 10:35:00 0:15:59 0:08:58 0:07:01 smithi master centos 8.1 rados/singleton-nomsgr/{all/lazy_omap_stats_output rados supported-random-distro$/{centos_8}} 1
pass 5124139 2020-06-07 07:04:08 2020-06-07 10:20:08 2020-06-07 10:48:07 0:27:59 0:21:29 0:06:30 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/osd-delay msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/cache-pool-snaps-readproxy} 2
pass 5124140 2020-06-07 07:04:09 2020-06-07 10:20:08 2020-06-07 10:44:07 0:23:59 0:14:04 0:09:55 smithi master ubuntu 18.04 rados/singleton/{all/rebuild-mondb msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124141 2020-06-07 07:04:10 2020-06-07 10:20:08 2020-06-07 10:44:08 0:24:00 0:13:14 0:10:46 smithi master centos 7.6 rados/cephadm/smoke-roleless/{distro/centos_7 start} 2
pass 5124142 2020-06-07 07:04:10 2020-06-07 10:22:09 2020-06-07 11:02:09 0:40:00 0:33:50 0:06:10 smithi master centos 8.1 rados/singleton-bluestore/{all/cephtool msgr-failures/few msgr/async objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8}} 1
pass 5124143 2020-06-07 07:04:11 2020-06-07 10:22:09 2020-06-07 11:00:09 0:38:00 0:26:56 0:11:04 smithi master ubuntu 18.04 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/few objectstore/bluestore-bitmap rados recovery-overrides/{default} supported-random-distro$/{ubuntu_latest} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 5124144 2020-06-07 07:04:12 2020-06-07 10:22:09 2020-06-07 11:38:10 1:16:01 1:03:23 0:12:38 smithi master ubuntu 18.04 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-hybrid supported-random-distro$/{ubuntu_latest} tasks/dashboard} 2
pass 5124145 2020-06-07 07:04:13 2020-06-07 10:22:09 2020-06-07 10:40:08 0:17:59 0:09:22 0:08:37 smithi master ubuntu 18.04 rados/objectstore/{backends/fusestore supported-random-distro$/{ubuntu_latest}} 1
fail 5124146 2020-06-07 07:04:14 2020-06-07 10:24:15 2020-06-07 10:38:14 0:13:59 0:03:23 0:10:36 smithi master ubuntu 18.04 rados/cephadm/upgrade/{1-start 2-start-upgrade 3-wait distro$/{ubuntu_18.04} fixed-2} 2
Failure Reason:

Command failed on smithi123 with status 5: 'sudo systemctl stop ceph-d2c2b26e-a8aa-11ea-a06b-001a4aab830c@mon.a'

fail 5124147 2020-06-07 07:04:15 2020-06-07 10:24:15 2020-06-07 11:00:15 0:36:00 0:24:15 0:11:45 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/nautilus backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_7.6} msgr-failures/fastclose rados thrashers/none thrashosds-health workloads/rbd_cls} 3
Failure Reason:

reached maximum tries (180) after waiting for 180 seconds

pass 5124148 2020-06-07 07:04:16 2020-06-07 10:24:15 2020-06-07 10:48:14 0:23:59 0:18:08 0:05:51 smithi master centos 8.1 rados/singleton/{all/recovery-preemption msgr-failures/few msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8}} 1
pass 5124149 2020-06-07 07:04:16 2020-06-07 10:24:21 2020-06-07 11:00:21 0:36:00 0:29:08 0:06:52 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{default} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/cache-pool-snaps} 2
fail 5124150 2020-06-07 07:04:17 2020-06-07 10:26:09 2020-06-07 10:46:08 0:19:59 0:13:31 0:06:28 smithi master centos 8.1 rados/cephadm/workunits/{distro/centos_latest task/test_adoption} 1
Failure Reason:

Command failed (workunit test cephadm/test_adoption.sh) on smithi193 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_adoption.sh'

pass 5124151 2020-06-07 07:04:18 2020-06-07 10:26:09 2020-06-07 10:46:09 0:20:00 0:12:13 0:07:47 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-low-osd-mem-target openstack settings/optimized ubuntu_latest workloads/radosbench_4K_rand_read} 1
pass 5124152 2020-06-07 07:04:19 2020-06-07 10:26:09 2020-06-07 11:20:10 0:54:01 0:46:06 0:07:55 smithi master ubuntu 18.04 rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/mon} 1
pass 5124153 2020-06-07 07:04:20 2020-06-07 10:26:09 2020-06-07 10:46:09 0:20:00 0:09:38 0:10:22 smithi master ubuntu 18.04 rados/monthrash/{ceph clusters/9-mons msgr-failures/mon-delay msgr/async-v1only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest} thrashers/one workloads/rados_5925} 2
pass 5124154 2020-06-07 07:04:21 2020-06-07 10:26:09 2020-06-07 10:46:09 0:20:00 0:11:13 0:08:47 smithi master centos 8.0 rados/cephadm/smoke/{distro/centos_8.0 fixed-2 start} 2
pass 5124155 2020-06-07 07:04:22 2020-06-07 10:26:09 2020-06-07 10:46:09 0:20:00 0:14:27 0:05:33 smithi master rhel 8.1 rados/singleton-nomsgr/{all/librados_hello_world rados supported-random-distro$/{rhel_8}} 1
pass 5124156 2020-06-07 07:04:23 2020-06-07 10:26:10 2020-06-07 10:50:09 0:23:59 0:14:43 0:09:16 smithi master rhel 8.1 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/few objectstore/bluestore-comp-lz4 rados recovery-overrides/{default} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 5124157 2020-06-07 07:04:23 2020-06-07 10:26:10 2020-06-07 10:56:10 0:30:00 0:22:46 0:07:14 smithi master centos 8.1 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} tasks/rados_workunit_loadgen_mix} 2
pass 5124158 2020-06-07 07:04:24 2020-06-07 10:26:15 2020-06-07 10:58:15 0:32:00 0:24:03 0:07:57 smithi master rhel 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/fastclose objectstore/bluestore-bitmap rados recovery-overrides/{default} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-small-objects} 2
pass 5124159 2020-06-07 07:04:25 2020-06-07 10:28:19 2020-06-07 10:46:18 0:17:59 0:12:17 0:05:42 smithi master rhel 8.1 rados/singleton/{all/resolve_stuck_peering msgr-failures/many msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8}} 2
pass 5124160 2020-06-07 07:04:26 2020-06-07 10:28:19 2020-06-07 10:46:19 0:18:00 0:10:35 0:07:25 smithi master centos 8.0 rados/cephadm/smoke-roleless/{distro/centos_8.0 start} 2
pass 5124161 2020-06-07 07:04:27 2020-06-07 10:28:20 2020-06-07 11:02:19 0:33:59 0:26:10 0:07:49 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{default} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/cache-snaps-balanced} 2
pass 5124162 2020-06-07 07:04:28 2020-06-07 10:28:20 2020-06-07 10:46:19 0:17:59 0:11:17 0:06:42 smithi master centos 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/osd-delay objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-active-recovery} supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3} 3
pass 5124163 2020-06-07 07:04:29 2020-06-07 10:28:20 2020-06-07 10:46:19 0:17:59 0:10:55 0:07:04 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-lz4 rados tasks/mon_recovery validater/lockdep} 2
pass 5124164 2020-06-07 07:04:30 2020-06-07 10:28:20 2020-06-07 11:06:19 0:37:59 0:31:30 0:06:29 smithi master rhel 8.1 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-snappy supported-random-distro$/{rhel_8} tasks/module_selftest} 2
fail 5124165 2020-06-07 07:04:31 2020-06-07 10:28:19 2020-06-07 10:48:19 0:20:00 0:10:07 0:09:53 smithi master ubuntu 18.04 rados/cephadm/workunits/{distro/ubuntu_18.04_podman task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi027 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

pass 5124166 2020-06-07 07:04:31 2020-06-07 10:28:20 2020-06-07 10:46:19 0:17:59 0:10:22 0:07:37 smithi master rhel 8.1 rados/multimon/{clusters/3 msgr-failures/many msgr/async-v1only no_pools objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8} tasks/mon_clock_with_skews} 2
pass 5124167 2020-06-07 07:04:32 2020-06-07 10:30:21 2020-06-07 10:52:21 0:22:00 0:10:47 0:11:13 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-stupid openstack settings/optimized ubuntu_latest workloads/radosbench_4K_seq_read} 1
pass 5124168 2020-06-07 07:04:33 2020-06-07 10:30:21 2020-06-07 10:52:21 0:22:00 0:11:47 0:10:13 smithi master ubuntu 18.04 rados/singleton/{all/test-crash msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{ubuntu_latest}} 1
pass 5124169 2020-06-07 07:04:34 2020-06-07 10:30:21 2020-06-07 10:58:21 0:28:00 0:22:19 0:05:41 smithi master centos 8.1 rados/cephadm/with-work/{distro/centos_latest fixed-2 mode/packaged msgr/async-v2only start tasks/rados_python} 2
pass 5124170 2020-06-07 07:04:35 2020-06-07 10:30:22 2020-06-07 11:06:22 0:36:00 0:27:56 0:08:04 smithi master rhel 8.1 rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/osd-delay rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-snaps-few-objects-overwrites} 2
pass 5124171 2020-06-07 07:04:36 2020-06-07 10:30:21 2020-06-07 11:06:21 0:36:00 0:29:33 0:06:27 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/osd-delay msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/cache-snaps} 2
pass 5124172 2020-06-07 07:04:37 2020-06-07 10:30:21 2020-06-07 10:48:21 0:18:00 0:10:48 0:07:12 smithi master centos 8.1 rados/cephadm/smoke/{distro/centos_latest fixed-2 start} 2
pass 5124173 2020-06-07 07:04:38 2020-06-07 10:30:22 2020-06-07 10:56:21 0:25:59 0:16:58 0:09:01 smithi master rhel 8.1 rados/singleton-nomsgr/{all/msgr rados supported-random-distro$/{rhel_8}} 1
pass 5124174 2020-06-07 07:04:39 2020-06-07 10:30:22 2020-06-07 11:04:22 0:34:00 0:25:58 0:08:02 smithi master rhel 8.1 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/osd-delay objectstore/bluestore-comp-lz4 rados recovery-overrides/{default} supported-random-distro$/{rhel_8} thrashers/none thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 5124175 2020-06-07 07:04:40 2020-06-07 10:34:02 2020-06-07 11:02:01 0:27:59 0:21:51 0:06:08 smithi master rhel 8.1 rados/singleton/{all/test_envlibrados_for_rocksdb msgr-failures/many msgr/async objectstore/filestore-xfs rados supported-random-distro$/{rhel_8}} 1
pass 5124176 2020-06-07 07:04:40 2020-06-07 10:34:02 2020-06-07 10:52:01 0:17:59 0:10:29 0:07:30 smithi master centos 8.1 rados/cephadm/smoke-roleless/{distro/centos_latest start} 2
pass 5124177 2020-06-07 07:04:41 2020-06-07 10:34:08 2020-06-07 11:46:09 1:12:01 1:05:19 0:06:42 smithi master rhel 8.1 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-low-osd-mem-target supported-random-distro$/{rhel_8} tasks/dashboard} 2
pass 5124178 2020-06-07 07:04:42 2020-06-07 10:34:09 2020-06-07 10:52:08 0:17:59 0:12:16 0:05:43 smithi master centos 8.1 rados/objectstore/{backends/keyvaluedb supported-random-distro$/{centos_8}} 1
pass 5124179 2020-06-07 07:04:43 2020-06-07 10:34:10 2020-06-07 11:00:10 0:26:00 0:18:58 0:07:02 smithi master centos 8.1 rados/valgrind-leaks/{1-start 2-inject-leak/osd centos_latest} 1
fail 5124180 2020-06-07 07:04:44 2020-06-07 10:35:19 2020-06-07 11:13:19 0:38:00 0:26:45 0:11:15 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/octopus backoff/peering_and_degraded ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_7.6} msgr-failures/few rados thrashers/pggrow thrashosds-health workloads/snaps-few-objects} 3
Failure Reason:

reached maximum tries (180) after waiting for 180 seconds

pass 5124181 2020-06-07 07:04:45 2020-06-07 10:36:06 2020-06-07 10:50:05 0:13:59 0:06:52 0:07:07 smithi master centos 8.1 rados/cephadm/workunits/{distro/centos_latest task/test_cephadm_repos} 1
pass 5124182 2020-06-07 07:04:46 2020-06-07 10:36:41 2020-06-07 10:56:40 0:19:59 0:14:10 0:05:49 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/cache} 2
pass 5124183 2020-06-07 07:04:47 2020-06-07 10:36:41 2020-06-07 11:12:41 0:36:00 0:30:13 0:05:47 smithi master centos 8.1 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} tasks/rados_workunit_loadgen_mostlyread} 2
pass 5124184 2020-06-07 07:04:48 2020-06-07 10:36:41 2020-06-07 11:10:41 0:34:00 0:28:29 0:05:31 smithi master rhel 8.1 rados/monthrash/{ceph clusters/3-mons msgr-failures/few msgr/async-v2only objectstore/filestore-xfs rados supported-random-distro$/{rhel_8} thrashers/sync-many workloads/rados_api_tests} 2
pass 5124185 2020-06-07 07:04:48 2020-06-07 10:36:41 2020-06-07 10:56:41 0:20:00 0:11:48 0:08:12 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-basic-min-osd-mem-target openstack settings/optimized ubuntu_latest workloads/radosbench_4M_rand_read} 1
pass 5124186 2020-06-07 07:04:49 2020-06-07 10:38:24 2020-06-07 12:06:25 1:28:01 1:18:43 0:09:18 smithi master ubuntu 18.04 rados/singleton/{all/thrash-backfill-full msgr-failures/few msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{ubuntu_latest}} 2
pass 5124187 2020-06-07 07:04:50 2020-06-07 10:38:24 2020-06-07 10:58:24 0:20:00 0:12:01 0:07:59 smithi master centos 8.1 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/osd-delay objectstore/bluestore-comp-snappy rados recovery-overrides/{default} supported-random-distro$/{centos_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
pass 5124188 2020-06-07 07:04:51 2020-06-07 10:38:24 2020-06-07 11:20:24 0:42:00 0:35:51 0:06:09 smithi master rhel 8.0 rados/cephadm/with-work/{distro/rhel_8.0 fixed-2 mode/root msgr/async start tasks/rados_api_tests} 2
pass 5124189 2020-06-07 07:04:52 2020-06-07 10:38:24 2020-06-07 11:16:24 0:38:00 0:32:24 0:05:36 smithi master rhel 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast msgr-failures/few objectstore/bluestore-comp-lz4 rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=clay-k=4-m=2} 2
pass 5124190 2020-06-07 07:04:53 2020-06-07 10:38:25 2020-06-07 11:00:24 0:21:59 0:15:02 0:06:57 smithi master rhel 7.7 rados/cephadm/smoke/{distro/rhel_7 fixed-2 start} 2
pass 5124191 2020-06-07 07:04:54 2020-06-07 16:11:49 2020-06-07 16:49:49 0:38:00 0:24:45 0:13:15 smithi master rhel 8.1 rados/singleton-nomsgr/{all/multi-backfill-reject rados supported-random-distro$/{rhel_8}} 2
pass 5124192 2020-06-07 07:04:55 2020-06-07 16:11:57 2020-06-07 16:31:56 0:19:59 0:12:46 0:07:13 smithi master ubuntu 18.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-zlib supported-random-distro$/{ubuntu_latest} tasks/progress} 2
pass 5124193 2020-06-07 07:04:56 2020-06-07 16:13:58 2020-06-07 17:03:59 0:50:01 0:28:38 0:21:23 smithi master rhel 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/fastclose objectstore/bluestore-comp-snappy rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
dead 5124194 2020-06-07 07:04:56 2020-06-07 16:13:59 2020-06-08 04:16:31 12:02:32 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-snappy rados tasks/rados_api_tests validater/valgrind} 2
pass 5124195 2020-06-07 07:04:57 2020-06-07 16:13:59 2020-06-07 16:35:58 0:21:59 0:14:01 0:07:58 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{rhel_8} thrashers/morepggrow thrashosds-health workloads/dedup_tier} 2
pass 5124196 2020-06-07 07:04:58 2020-06-07 16:15:57 2020-06-07 16:55:57 0:40:00 0:28:23 0:11:37 smithi master ubuntu 18.04 rados/singleton/{all/thrash-eio msgr-failures/many msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{ubuntu_latest}} 2
pass 5124197 2020-06-07 07:04:59 2020-06-07 16:15:57 2020-06-07 16:41:57 0:26:00 0:13:45 0:12:15 smithi master rhel 7.7 rados/cephadm/smoke-roleless/{distro/rhel_7 start} 2
pass 5124198 2020-06-07 07:05:00 2020-06-07 16:15:57 2020-06-07 18:56:00 2:40:03 2:32:56 0:07:07 smithi master ubuntu 18.04 rados/standalone/{supported-random-distro$/{ubuntu_latest} workloads/osd} 1
pass 5124199 2020-06-07 07:05:01 2020-06-07 17:08:03 2020-06-07 17:28:03 0:20:00 0:10:56 0:09:04 smithi master centos 8.1 rados/multimon/{clusters/6 msgr-failures/few msgr/async-v2only no_pools objectstore/bluestore-comp-zlib rados supported-random-distro$/{centos_8} tasks/mon_recovery} 2
pass 5124200 2020-06-07 07:05:02 2020-06-07 17:08:03 2020-06-07 17:26:03 0:18:00 0:11:14 0:06:46 smithi master ubuntu 18.04 rados/cephadm/workunits/{distro/ubuntu_18.04_podman task/test_orch_cli} 1
pass 5124201 2020-06-07 07:05:03 2020-06-07 17:09:53 2020-06-07 17:31:52 0:21:59 0:09:52 0:12:07 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-bitmap openstack settings/optimized ubuntu_latest workloads/radosbench_4M_seq_read} 1
pass 5124202 2020-06-07 07:05:03 2020-06-07 17:10:06 2020-06-07 18:24:07 1:14:01 0:20:35 0:53:26 smithi master ubuntu 18.04 rados/singleton/{all/thrash-rados/{thrash-rados thrashosds-health} msgr-failures/few msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest}} 2
pass 5124203 2020-06-07 07:05:04 2020-06-07 17:12:07 2020-06-07 18:28:09 1:16:02 0:22:04 0:53:58 smithi master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/osd-delay msgr/async objectstore/filestore-xfs rados supported-random-distro$/{ubuntu_latest} thrashers/none thrashosds-health workloads/pool-snaps-few-objects} 2
pass 5124204 2020-06-07 07:05:05 2020-06-07 17:12:08 2020-06-07 18:10:08 0:58:00 0:24:48 0:33:12 smithi master rhel 8.1 rados/cephadm/with-work/{distro/rhel_latest fixed-2 mode/packaged msgr/async-v1only start tasks/rados_python} 2
pass 5124205 2020-06-07 07:05:06 2020-06-07 17:12:08 2020-06-07 18:12:08 1:00:00 0:29:37 0:30:23 smithi master rhel 8.1 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/fastclose objectstore/bluestore-comp-snappy rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/pggrow thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 5124206 2020-06-07 07:05:07 2020-06-07 17:12:08 2020-06-07 18:06:08 0:54:00 0:10:58 0:43:02 smithi master ubuntu 18.04 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/few msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{ubuntu_latest} tasks/readwrite} 2
pass 5124207 2020-06-07 07:05:08 2020-06-07 17:12:15 2020-06-07 17:36:15 0:24:00 0:13:34 0:10:26 smithi master rhel 8.0 rados/cephadm/smoke/{distro/rhel_8.0 fixed-2 start} 2
pass 5124208 2020-06-07 07:05:09 2020-06-07 17:13:59 2020-06-07 17:41:59 0:28:00 0:20:41 0:07:19 smithi master rhel 8.1 rados/singleton-nomsgr/{all/osd_stale_reads rados supported-random-distro$/{rhel_8}} 1
fail 5124209 2020-06-07 07:05:09 2020-06-07 17:16:07 2020-06-07 17:58:07 0:42:00 0:19:30 0:22:30 smithi master rhel 8.1 rados/singleton/{all/thrash_cache_writeback_proxy_none msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{rhel_8}} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --max-ops 400000 --objects 10000 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op read 100 --op write 50 --op delete 50 --op copy_from 50 --op write_excl 50 --pool base'

pass 5124210 2020-06-07 07:05:10 2020-06-07 17:16:07 2020-06-07 17:44:07 0:28:00 0:13:37 0:14:23 smithi master rhel 8.0 rados/cephadm/smoke-roleless/{distro/rhel_8.0 start} 2
pass 5124211 2020-06-07 07:05:11 2020-06-07 17:16:09 2020-06-07 18:16:10 1:00:01 0:52:36 0:07:25 smithi master rhel 8.1 rados/monthrash/{ceph clusters/9-mons msgr-failures/mon-delay msgr/async objectstore/bluestore-bitmap rados supported-random-distro$/{rhel_8} thrashers/sync workloads/rados_mon_osdmap_prune} 2
pass 5124212 2020-06-07 07:05:12 2020-06-07 17:16:10 2020-06-07 18:00:10 0:44:00 0:36:17 0:07:43 smithi master rhel 8.1 rados/singleton-bluestore/{all/cephtool msgr-failures/many msgr/async-v1only objectstore/bluestore-comp-snappy rados supported-random-distro$/{rhel_8}} 1
pass 5124213 2020-06-07 07:05:13 2020-06-07 17:18:18 2020-06-07 18:36:19 1:18:01 0:31:38 0:46:23 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-async-partial-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8} thrashers/pggrow thrashosds-health workloads/rados_api_tests} 2
pass 5124214 2020-06-07 07:05:14 2020-06-07 17:18:18 2020-06-07 18:10:18 0:52:00 0:32:29 0:19:31 smithi master rhel 8.1 rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/fast msgr-failures/fastclose rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/fastread thrashosds-health workloads/ec-pool-snaps-few-objects-overwrites} 2
pass 5124215 2020-06-07 07:05:15 2020-06-07 17:20:15 2020-06-07 17:50:15 0:30:00 0:11:13 0:18:47 smithi master ubuntu 18.04 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/fastclose objectstore/bluestore-comp-zlib rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
fail 5124216 2020-06-07 07:05:15 2020-06-07 17:20:15 2020-06-07 18:28:16 1:08:01 0:22:32 0:45:29 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/2-size-2-min-size 1-install/luminous-v1only backoff/normal ceph clusters/{openstack three-plus-one} d-balancer/crush-compat distro$/{centos_7.6} msgr-failures/osd-delay rados thrashers/pggrow thrashosds-health workloads/test_rbd_api} 3
Failure Reason:

reached maximum tries (180) after waiting for 180 seconds

pass 5124217 2020-06-07 07:05:16 2020-06-07 17:22:16 2020-06-07 18:30:17 1:08:01 1:00:18 0:07:43 smithi master centos 8.1 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-stupid supported-random-distro$/{centos_8} tasks/dashboard} 2
pass 5124218 2020-06-07 07:05:17 2020-06-07 17:22:16 2020-06-07 17:56:16 0:34:00 0:27:31 0:06:29 smithi master rhel 8.1 rados/objectstore/{backends/objectcacher-stress supported-random-distro$/{rhel_8}} 1
fail 5124219 2020-06-07 07:05:18 2020-06-07 17:22:16 2020-06-07 17:38:16 0:16:00 0:08:57 0:07:03 smithi master ubuntu 18.04 rados/cephadm/workunits/{distro/ubuntu_18.04_podman task/test_adoption} 1
Failure Reason:

Command failed (workunit test cephadm/test_adoption.sh) on smithi146 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_adoption.sh'

pass 5124220 2020-06-07 07:05:19 2020-06-07 17:24:04 2020-06-07 17:40:04 0:16:00 0:09:38 0:06:22 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-comp openstack settings/optimized ubuntu_latest workloads/radosbench_4M_write} 1
pass 5124221 2020-06-07 07:05:20 2020-06-07 17:24:05 2020-06-07 17:40:04 0:15:59 0:11:04 0:04:55 smithi master rhel 8.1 rados/singleton/{all/watch-notify-same-primary msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8}} 1
pass 5124222 2020-06-07 07:05:21 2020-06-07 17:24:05 2020-06-07 18:06:05 0:42:00 0:24:59 0:17:01 smithi master ubuntu 18.04 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/osd-delay objectstore/bluestore-comp-snappy rados recovery-overrides/{more-active-recovery} supported-random-distro$/{ubuntu_latest} thrashers/fastread thrashosds-health workloads/ec-rados-plugin=jerasure-k=2-m=1} 2
fail 5124223 2020-06-07 07:05:22 2020-06-07 17:25:54 2020-06-07 18:05:54 0:40:00 0:32:26 0:07:34 smithi master ubuntu 18.04 rados/cephadm/with-work/{distro/ubuntu_18.04 fixed-2 mode/root msgr/async-v2only start tasks/rados_api_tests} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T17:43:26.926663+00:00 smithi070 bash[11982]: debug 2020-06-07T17:43:26.919+0000 7f2e36fd8700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon prometheus.a ... ' in syslog

pass 5124224 2020-06-07 07:05:23 2020-06-07 17:25:54 2020-06-07 17:43:54 0:18:00 0:09:58 0:08:02 smithi master centos 8.1 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-comp-zstd supported-random-distro$/{centos_8} tasks/prometheus} 2
pass 5124225 2020-06-07 07:05:23 2020-06-07 17:25:54 2020-06-07 17:43:54 0:18:00 0:09:51 0:08:09 smithi master ubuntu 18.04 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/few objectstore/bluestore-comp-zlib rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/careful thrashosds-health workloads/ec-rados-plugin=lrc-k=4-m=2-l=3} 3
pass 5124226 2020-06-07 07:05:24 2020-06-07 17:26:04 2020-06-07 17:50:04 0:24:00 0:16:32 0:07:28 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/none msgr-failures/few msgr/async objectstore/bluestore-comp-zlib rados tasks/rados_cls_all validater/lockdep} 2
pass 5124227 2020-06-07 07:05:25 2020-06-07 17:28:01 2020-06-07 18:22:02 0:54:01 0:45:56 0:08:05 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{centos_8} thrashers/careful thrashosds-health workloads/radosbench-high-concurrency} 2
pass 5124228 2020-06-07 07:05:26 2020-06-07 17:28:01 2020-06-07 17:56:01 0:28:00 0:11:50 0:16:10 smithi master rhel 8.1 rados/cephadm/smoke/{distro/rhel_latest fixed-2 start} 2
pass 5124229 2020-06-07 07:05:27 2020-06-07 17:28:04 2020-06-07 17:46:04 0:18:00 0:10:26 0:07:34 smithi master rhel 8.1 rados/multimon/{clusters/9 msgr-failures/many msgr/async no_pools objectstore/bluestore-comp-zstd rados supported-random-distro$/{rhel_8} tasks/mon_clock_no_skews} 3
pass 5124230 2020-06-07 07:05:28 2020-06-07 17:30:20 2020-06-07 17:42:23 0:12:03 0:06:17 0:05:46 smithi master centos 8.1 rados/singleton/{all/admin-socket msgr-failures/many msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8}} 1
pass 5124231 2020-06-07 07:05:29 2020-06-07 17:30:20 2020-06-07 17:50:20 0:20:00 0:13:06 0:06:54 smithi master rhel 8.1 rados/singleton-nomsgr/{all/pool-access rados supported-random-distro$/{rhel_8}} 1
pass 5124232 2020-06-07 07:05:30 2020-06-07 17:32:08 2020-06-07 17:52:08 0:20:00 0:11:17 0:08:43 smithi master rhel 8.1 rados/cephadm/smoke-roleless/{distro/rhel_latest start} 2
pass 5124233 2020-06-07 07:05:31 2020-06-07 17:32:09 2020-06-07 18:12:09 0:40:00 0:15:33 0:24:27 smithi master ubuntu 18.04 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/many msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{ubuntu_latest} tasks/repair_test} 2
pass 5124234 2020-06-07 07:05:31 2020-06-07 17:34:05 2020-06-07 18:04:05 0:30:00 0:21:23 0:08:37 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-low-osd-mem-target openstack settings/optimized ubuntu_latest workloads/radosbench_omap_write} 1
fail 5124235 2020-06-07 07:05:32 2020-06-07 17:34:05 2020-06-07 17:54:04 0:19:59 0:12:22 0:07:37 smithi master centos 8.1 rados/cephadm/workunits/{distro/centos_latest task/test_cephadm} 1
Failure Reason:

Command failed (workunit test cephadm/test_cephadm.sh) on smithi164 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=779de8c3d6f291c76010f3cd826cde6f94255e08 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 3h /home/ubuntu/cephtest/clone.client.0/qa/workunits/cephadm/test_cephadm.sh'

pass 5124236 2020-06-07 07:05:33 2020-06-07 17:34:08 2020-06-07 18:50:09 1:16:01 1:10:08 0:05:53 smithi master ubuntu 18.04 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/osd-delay msgr/async objectstore/bluestore-comp-snappy rados supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/radosbench} 2
pass 5124237 2020-06-07 07:05:34 2020-06-07 17:34:10 2020-06-07 18:34:10 1:00:00 0:29:36 0:30:24 smithi master rhel 8.1 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/few objectstore/bluestore-comp-zlib rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/careful thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2
pass 5124238 2020-06-07 07:05:35 2020-06-07 17:35:37 2020-06-07 17:57:37 0:22:00 0:13:29 0:08:31 smithi master rhel 8.1 rados/singleton/{all/deduptool msgr-failures/few msgr/async-v1only objectstore/bluestore-low-osd-mem-target rados supported-random-distro$/{rhel_8}} 1
fail 5124239 2020-06-07 07:05:36 2020-06-07 17:35:58 2020-06-07 17:55:58 0:20:00 0:11:31 0:08:29 smithi master ubuntu 18.04 rados/cephadm/smoke/{distro/ubuntu_18.04 fixed-2 start} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T17:52:35.548965+00:00 smithi006 bash[8361]: debug 2020-06-07T17:52:35.545+0000 7fc0b8002700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon prometheus.a ... ' in syslog

pass 5124240 2020-06-07 07:05:37 2020-06-07 17:36:16 2020-06-07 19:00:18 1:24:02 1:17:18 0:06:44 smithi master rhel 8.1 rados/standalone/{supported-random-distro$/{rhel_8} workloads/scrub} 1
pass 5124241 2020-06-07 07:05:38 2020-06-07 17:38:15 2020-06-07 18:00:15 0:22:00 0:11:15 0:10:45 smithi master ubuntu 18.04 rados/thrash-erasure-code-shec/{ceph clusters/{fixed-4 openstack} msgr-failures/few objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=shec-k=4-m=3-c=2} 4
fail 5124242 2020-06-07 07:05:38 2020-06-07 17:38:15 2020-06-07 17:58:15 0:20:00 0:10:55 0:09:05 smithi master ubuntu 18.04 rados/cephadm/smoke-roleless/{distro/ubuntu_18.04 start} 2
Failure Reason:

'/home/ubuntu/cephtest/archive/syslog/misc.log:2020-06-07T17:49:32.314961+00:00 smithi107 bash[8814]: debug 2020-06-07T17:49:32.310+0000 7f8e0cc9b700 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr:INFO:cephadm:Deploy daemon alertmanager.smithi107 ... ' in syslog

pass 5124243 2020-06-07 07:05:39 2020-06-07 17:38:15 2020-06-07 18:20:15 0:42:00 0:33:16 0:08:44 smithi master rhel 8.1 rados/monthrash/{ceph clusters/3-mons msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-lz4 rados supported-random-distro$/{rhel_8} thrashers/force-sync-many workloads/rados_mon_workunits} 2
pass 5124244 2020-06-07 07:05:40 2020-06-07 17:38:17 2020-06-07 18:04:17 0:26:00 0:18:14 0:07:46 smithi master rhel 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/normal ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/fastclose msgr/async-v1only objectstore/bluestore-comp-zlib rados supported-random-distro$/{rhel_8} thrashers/mapgap thrashosds-health workloads/redirect} 2
pass 5124245 2020-06-07 07:05:41 2020-06-07 17:40:18 2020-06-07 17:56:17 0:15:59 0:08:19 0:07:40 smithi master centos 8.1 rados/singleton/{all/divergent_priors msgr-failures/many msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8}} 1
fail 5124246 2020-06-07 07:05:42 2020-06-07 17:40:18 2020-06-07 18:28:18 0:48:00 0:22:57 0:25:03 smithi master centos 7.6 rados/thrash-old-clients/{0-size-min-size-overrides/3-size-2-min-size 1-install/luminous backoff/peering ceph clusters/{openstack three-plus-one} d-balancer/on distro$/{centos_7.6} msgr-failures/fastclose rados thrashers/careful thrashosds-health workloads/cache-snaps} 3
Failure Reason:

reached maximum tries (180) after waiting for 180 seconds

pass 5124247 2020-06-07 07:05:43 2020-06-07 17:40:18 2020-06-07 18:18:18 0:38:00 0:30:21 0:07:39 smithi master rhel 8.1 rados/singleton-nomsgr/{all/recovery-unfound-found rados supported-random-distro$/{rhel_8}} 1
pass 5124248 2020-06-07 07:05:44 2020-06-07 17:40:18 2020-06-07 18:14:18 0:34:00 0:24:05 0:09:55 smithi master ubuntu 18.04 rados/cephadm/with-work/{distro/ubuntu_18.04_podman fixed-2 mode/packaged msgr/async start tasks/rados_python} 2
pass 5124249 2020-06-07 07:05:45 2020-06-07 17:41:47 2020-06-07 18:49:48 1:08:01 1:01:15 0:06:46 smithi master ubuntu 18.04 rados/dashboard/{clusters/{2-node-mgr} debug/mgr objectstore/filestore-xfs supported-random-distro$/{ubuntu_latest} tasks/dashboard} 2
fail 5124250 2020-06-07 07:05:45 2020-06-07 17:41:56 2020-06-07 22:30:03 4:48:07 4:32:07 0:16:00 smithi master ubuntu 18.04 rados/objectstore/{backends/objectstore supported-random-distro$/{ubuntu_latest}} 1
Failure Reason:

Command crashed: 'sudo TESTDIR=/home/ubuntu/cephtest bash -c \'mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-filestore 20 --debug-bluestore 20" ceph_test_objectstore --gtest_filter=-*/3 --gtest_catch_exceptions=0\''

pass 5124251 2020-06-07 07:05:46 2020-06-07 17:42:00 2020-06-07 18:02:00 0:20:00 0:12:02 0:07:58 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-stupid openstack settings/optimized ubuntu_latest workloads/sample_fio} 1
pass 5124252 2020-06-07 07:05:47 2020-06-07 17:42:06 2020-06-07 18:16:06 0:34:00 0:26:42 0:07:18 smithi master rhel 8.1 rados/thrash-erasure-code/{ceph clusters/{fixed-2 openstack} fast/fast msgr-failures/fastclose objectstore/bluestore-comp-zlib rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/minsize_recovery thrashosds-health workloads/ec-rados-plugin=jerasure-k=3-m=1} 2
pass 5124253 2020-06-07 07:05:48 2020-06-07 17:42:24 2020-06-07 17:58:24 0:16:00 0:09:45 0:06:15 smithi master ubuntu 18.04 rados/mgr/{clusters/{2-node-mgr} debug/mgr objectstore/bluestore-hybrid supported-random-distro$/{ubuntu_latest} tasks/workunits} 2
pass 5124254 2020-06-07 07:05:49 2020-06-07 17:44:11 2020-06-07 18:00:10 0:15:59 0:05:39 0:10:20 smithi master ubuntu 18.04 rados/cephadm/workunits/{distro/ubuntu_18.04_podman task/test_cephadm_repos} 1
pass 5124255 2020-06-07 07:05:50 2020-06-07 17:44:11 2020-06-07 18:36:11 0:52:00 0:30:14 0:21:46 smithi master rhel 8.1 rados/thrash-erasure-code-big/{ceph cluster/{12-osds openstack} msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-active-recovery} supported-random-distro$/{rhel_8} thrashers/default thrashosds-health workloads/ec-rados-plugin=jerasure-k=4-m=2} 3
pass 5124256 2020-06-07 07:05:51 2020-06-07 17:44:11 2020-06-07 18:28:11 0:44:00 0:26:58 0:17:02 smithi master centos 8.1 rados/verify/{centos_latest ceph clusters/{fixed-2 openstack} d-thrash/default/{default thrashosds-health} msgr-failures/few msgr/async-v1only objectstore/bluestore-comp-zstd rados tasks/mon_recovery validater/valgrind} 2
pass 5124257 2020-06-07 07:05:52 2020-06-07 17:44:11 2020-06-07 18:02:10 0:17:59 0:07:32 0:10:27 smithi master centos 8.1 rados/singleton/{all/divergent_priors2 msgr-failures/few msgr/async objectstore/filestore-xfs rados supported-random-distro$/{centos_8}} 1
pass 5124258 2020-06-07 07:05:53 2020-06-07 17:44:11 2020-06-07 18:24:11 0:40:00 0:12:39 0:27:21 smithi master ubuntu 18.04 rados/cephadm/smoke/{distro/ubuntu_18.04_podman fixed-2 start} 2
pass 5124259 2020-06-07 07:05:53 2020-06-07 17:44:11 2020-06-07 18:20:11 0:36:00 0:12:24 0:23:36 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/2-size-2-min-size 1-pg-log-overrides/normal_pg_log 2-recovery-overrides/{more-partial-recovery} backoff/peering ceph clusters/{fixed-2 openstack} d-balancer/crush-compat msgr-failures/few msgr/async-v2only objectstore/bluestore-comp-zstd rados supported-random-distro$/{centos_8} thrashers/morepggrow thrashosds-health workloads/redirect_promote_tests} 2
pass 5124260 2020-06-07 07:05:54 2020-06-07 17:46:19 2020-06-07 18:00:18 0:13:59 0:06:44 0:07:15 smithi master centos 8.1 rados/multimon/{clusters/21 msgr-failures/few msgr/async-v1only no_pools objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} tasks/mon_clock_with_skews} 3
pass 5124261 2020-06-07 07:05:55 2020-06-07 17:46:19 2020-06-07 18:16:19 0:30:00 0:22:39 0:07:21 smithi master rhel 8.1 rados/thrash-erasure-code-overwrites/{bluestore-bitmap ceph clusters/{fixed-2 openstack} fast/normal msgr-failures/few rados recovery-overrides/{more-partial-recovery} supported-random-distro$/{rhel_8} thrashers/minsize_recovery thrashosds-health workloads/ec-small-objects-fast-read-overwrites} 2
pass 5124262 2020-06-07 07:05:56 2020-06-07 17:48:17 2020-06-07 18:06:17 0:18:00 0:10:02 0:07:58 smithi master centos 8.1 rados/basic/{ceph clusters/{fixed-2 openstack} msgr-failures/few msgr/async-v2only objectstore/bluestore-stupid rados supported-random-distro$/{centos_8} tasks/scrub_test} 2
pass 5124263 2020-06-07 07:05:57 2020-06-07 17:50:11 2020-06-07 18:14:10 0:23:59 0:11:11 0:12:48 smithi master ubuntu 18.04 rados/cephadm/smoke-roleless/{distro/ubuntu_18.04_podman start} 2
pass 5124264 2020-06-07 07:05:58 2020-06-07 17:50:11 2020-06-07 18:06:10 0:15:59 0:08:10 0:07:49 smithi master centos 8.1 rados/singleton/{all/dump-stuck msgr-failures/many msgr/async-v1only objectstore/bluestore-bitmap rados supported-random-distro$/{centos_8}} 1
pass 5124265 2020-06-07 07:05:58 2020-06-07 17:50:11 2020-06-07 18:32:11 0:42:00 0:31:18 0:10:42 smithi master centos 8.0 rados/cephadm/with-work/{distro/centos_8.0 fixed-2 mode/root msgr/async start tasks/rados_api_tests} 2
pass 5124266 2020-06-07 07:05:59 2020-06-07 17:50:16 2020-06-07 18:10:16 0:20:00 0:13:28 0:06:32 smithi master rhel 8.1 rados/singleton-nomsgr/{all/version-number-sanity rados supported-random-distro$/{rhel_8}} 1
pass 5124267 2020-06-07 07:06:00 2020-06-07 17:50:21 2020-06-07 18:08:20 0:17:59 0:09:23 0:08:36 smithi master ubuntu 18.04 rados/perf/{ceph objectstore/bluestore-basic-min-osd-mem-target openstack settings/optimized ubuntu_latest workloads/sample_radosbench} 1
pass 5124268 2020-06-07 07:06:01 2020-06-07 17:52:25 2020-06-07 18:10:24 0:17:59 0:10:23 0:07:36 smithi master centos 8.1 rados/thrash/{0-size-min-size-overrides/3-size-2-min-size 1-pg-log-overrides/short_pg_log 2-recovery-overrides/{more-async-partial-recovery} backoff/peering_and_degraded ceph clusters/{fixed-2 openstack} d-balancer/on msgr-failures/osd-delay msgr/async objectstore/bluestore-hybrid rados supported-random-distro$/{centos_8} thrashers/none thrashosds-health workloads/redirect_set_object} 2
pass 5124269 2020-06-07 07:06:02 2020-06-07 17:54:17 2020-06-07 18:28:17 0:34:00 0:26:42 0:07:18 smithi master ubuntu 18.04 rados/thrash-erasure-code-isa/{arch/x86_64 ceph clusters/{fixed-2 openstack} msgr-failures/osd-delay objectstore/bluestore-comp-zstd rados recovery-overrides/{more-async-partial-recovery} supported-random-distro$/{ubuntu_latest} thrashers/default thrashosds-health workloads/ec-rados-plugin=isa-k=2-m=1} 2