Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 5182513 2020-06-26 21:35:07 2020-06-26 21:36:21 2020-06-26 22:30:21 0:54:00 0:46:48 0:07:12 smithi master rhel 8.1 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/bluestore-comp.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_8.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-06-26T21:51:08.428611+0000 mon.a (mon.0) 164 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log

fail 5182514 2020-06-26 21:35:08 2020-06-26 21:36:21 2020-06-26 22:30:21 0:54:00 0:46:35 0:07:25 smithi master rhel 8.1 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/bluestore-comp.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_8.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-06-26T21:52:47.632283+0000 mon.a (mon.0) 165 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log

pass 5182515 2020-06-26 21:35:09 2020-06-26 21:36:21 2020-06-26 22:08:21 0:32:00 0:10:37 0:21:23 smithi master centos 8.1 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/bluestore-comp-ec-root.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_8.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
pass 5182516 2020-06-26 21:35:10 2020-06-26 21:38:16 2020-06-26 22:00:16 0:22:00 0:15:01 0:06:59 smithi master rhel 8.1 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/bluestore-ec-root.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_8.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
pass 5182517 2020-06-26 21:35:11 2020-06-26 21:38:16 2020-06-26 22:08:16 0:30:00 0:14:53 0:15:07 smithi master rhel 8.1 fs/32bits/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-comp-ec-root.yaml overrides/{faked-ino.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_8.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
fail 5182518 2020-06-26 21:35:12 2020-06-26 21:38:16 2020-06-26 22:46:17 1:08:01 0:46:22 0:21:39 smithi master rhel 8.1 fs/permission/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-comp-ec-root.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_8.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-06-26T22:08:32.922788+0000 mon.b (mon.0) 149 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log

pass 5182519 2020-06-26 21:35:12 2020-06-26 21:39:13 2020-06-26 21:57:13 0:18:00 0:10:47 0:07:13 smithi master centos 8.1 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/bluestore-bitmap.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_8.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
pass 5182520 2020-06-26 21:35:13 2020-06-26 21:40:15 2020-06-26 22:08:14 0:27:59 0:14:20 0:13:39 smithi master rhel 8.1 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/filestore-xfs.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_8.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
fail 5182521 2020-06-26 21:35:14 2020-06-26 21:40:15 2020-06-26 22:38:15 0:58:00 0:41:22 0:16:38 smithi master ubuntu 18.04 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-06-26T22:01:49.592180+0000 mon.b (mon.0) 200 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log

pass 5182522 2020-06-26 21:35:15 2020-06-26 21:40:15 2020-06-26 22:08:14 0:27:59 0:11:02 0:16:57 smithi master ubuntu 18.04 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/bluestore-bitmap.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2