Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
pass 5195384 2020-07-02 09:04:23 2020-07-02 09:04:53 2020-07-02 09:22:52 0:17:59 0:10:58 0:07:01 smithi master centos 8.1 fs/32bits/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-comp-ec-root.yaml overrides/{faked-ino.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
fail 5195385 2020-07-02 09:04:24 2020-07-02 09:06:03 2020-07-02 09:36:03 0:30:00 0:11:09 0:18:51 smithi master centos 8.1 fs/permission/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-comp-ec-root.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

Command failed (workunit test suites/pjd.sh) on smithi042 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=cd2bc7b340400a759590a7339919bebe2817e781 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'

fail 5195386 2020-07-02 09:04:25 2020-07-02 09:06:03 2020-07-02 10:20:04 1:14:01 0:42:03 0:31:58 smithi master centos 8.1 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/bluestore-comp.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-07-02T09:42:54.399998+0000 mon.a (mon.0) 248 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log

fail 5195387 2020-07-02 09:04:26 2020-07-02 09:06:04 2020-07-02 10:00:05 0:54:01 0:45:34 0:08:27 smithi master rhel 8.1 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/bluestore-comp.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-07-02T09:22:24.728131+0000 mon.b (mon.0) 144 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log

pass 5195388 2020-07-02 09:04:27 2020-07-02 09:06:30 2020-07-02 09:28:29 0:21:59 0:10:31 0:11:28 smithi master centos 8.1 fs/32bits/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{faked-ino.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
fail 5195389 2020-07-02 09:04:27 2020-07-02 09:06:31 2020-07-02 09:32:31 0:26:00 0:12:46 0:13:14 smithi master ubuntu 18.04 fs/permission/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

Command failed (workunit test suites/pjd.sh) on smithi121 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=cd2bc7b340400a759590a7339919bebe2817e781 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'

pass 5195390 2020-07-02 09:04:28 2020-07-02 09:06:46 2020-07-02 09:36:46 0:30:00 0:11:41 0:18:19 smithi master ubuntu 18.04 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/bluestore-comp-ec-root.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
pass 5195391 2020-07-02 09:04:29 2020-07-02 09:08:33 2020-07-02 09:38:32 0:29:59 0:14:28 0:15:31 smithi master rhel 8.1 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/bluestore-ec-root.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
pass 5195392 2020-07-02 09:04:30 2020-07-02 09:10:20 2020-07-02 09:28:19 0:17:59 0:11:18 0:06:41 smithi master centos 8.1 fs/32bits/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-bitmap.yaml overrides/{faked-ino.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
fail 5195393 2020-07-02 09:04:31 2020-07-02 09:12:14 2020-07-02 09:46:13 0:33:59 0:14:37 0:19:22 smithi master rhel 8.1 fs/permission/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-bitmap.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{rhel_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

Command failed (workunit test suites/pjd.sh) on smithi124 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=cd2bc7b340400a759590a7339919bebe2817e781 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'

pass 5195394 2020-07-02 09:04:32 2020-07-02 09:12:16 2020-07-02 09:32:15 0:19:59 0:11:36 0:08:23 smithi master centos 8.1 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/bluestore-bitmap.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
pass 5195395 2020-07-02 09:04:33 2020-07-02 09:12:21 2020-07-02 09:32:21 0:20:00 0:10:30 0:09:30 smithi master ubuntu 18.04 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/filestore-xfs.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
pass 5195396 2020-07-02 09:04:34 2020-07-02 09:12:32 2020-07-02 09:38:32 0:26:00 0:11:15 0:14:45 smithi master ubuntu 18.04 fs/32bits/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-comp.yaml overrides/{faked-ino.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
fail 5195397 2020-07-02 09:04:34 2020-07-02 09:14:11 2020-07-02 09:36:11 0:22:00 0:10:55 0:11:05 smithi master ubuntu 18.04 fs/permission/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/bluestore-comp.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

Command failed (workunit test suites/pjd.sh) on smithi099 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=cd2bc7b340400a759590a7339919bebe2817e781 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'

fail 5195398 2020-07-02 09:04:35 2020-07-02 09:14:17 2020-07-02 10:18:18 1:04:01 0:41:38 0:22:23 smithi master centos 8.1 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-07-02T09:42:38.906323+0000 mon.a (mon.0) 237 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log

pass 5195399 2020-07-02 09:04:36 2020-07-02 09:14:26 2020-07-02 09:38:25 0:23:59 0:11:09 0:12:50 smithi master ubuntu 18.04 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/bluestore-bitmap.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
pass 5195400 2020-07-02 09:04:37 2020-07-02 09:16:31 2020-07-02 09:38:30 0:21:59 0:10:56 0:11:03 smithi master ubuntu 18.04 fs/32bits/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/filestore-xfs.yaml overrides/{faked-ino.yaml frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
fail 5195401 2020-07-02 09:04:38 2020-07-02 09:16:36 2020-07-02 10:12:37 0:56:01 0:41:37 0:14:24 smithi master ubuntu 18.04 fs/permission/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml objectstore-ec/filestore-xfs.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-07-02T09:35:21.958928+0000 mon.b (mon.0) 138 : cluster [WRN] Health check failed: Reduced data availability: 2 pgs inactive, 2 pgs peering (PG_AVAILABILITY)" in cluster log

pass 5195402 2020-07-02 09:04:39 2020-07-02 09:17:58 2020-07-02 09:49:57 0:31:59 0:11:04 0:20:55 smithi master centos 8.1 fs/thrash/{begin.yaml ceph-thrash/mds.yaml clusters/1-mds-1-client-coloc.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} mount/fuse.yaml msgr-failures/none.yaml objectstore-ec/bluestore-ec-root.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{centos_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
fail 5195403 2020-07-02 09:04:40 2020-07-02 09:18:04 2020-07-02 10:20:05 1:02:01 0:41:51 0:20:10 smithi master ubuntu 18.04 fs/basic_workload/{begin.yaml clusters/fixed-2-ucephfs.yaml conf/{client.yaml mds.yaml mon.yaml osd.yaml} inline/no.yaml mount/fuse.yaml objectstore-ec/bluestore-comp-ec-root.yaml omap_limit/10.yaml overrides/{frag_enable.yaml whitelist_health.yaml whitelist_wrongly_marked_down.yaml} supported-random-distros$/{ubuntu_latest.yaml} tasks/cfuse_workunit_suites_pjd.yaml} 2
Failure Reason:

"2020-07-02T09:44:38.325759+0000 mon.a (mon.0) 169 : cluster [WRN] Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY)" in cluster log