Status Job ID Links Posted Started Updated
Runtime
Duration
In Waiting
Machine Teuthology Branch OS Type OS Version Description Nodes
fail 5554486 2020-10-24 08:07:19 2020-10-24 08:07:29 2020-10-24 08:39:29 0:32:00 0:25:51 0:06:09 smithi master rhel 7.8 rados:thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/few.yaml objectstore/bluestore-comp-snappy.yaml rados.yaml recovery-overrides/{more-async-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 0 --op delete 50 --op snap_create 50 --op snap_remove 50 --op rollback 50 --op setattr 25 --op rmattr 25 --op copy_from 50 --op append 100 --pool unique_pool_0'

dead 5554487 2020-10-24 08:07:19 2020-10-24 08:08:47 2020-10-24 20:11:17 12:02:30 smithi master centos 7.8 rados:thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp-zlib.yaml rados.yaml recovery-overrides/{more-async-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml} 2
pass 5554488 2020-10-24 08:07:20 2020-10-24 08:08:47 2020-10-24 09:30:48 1:22:01 1:12:23 0:09:38 smithi master centos 7.8 rados:thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp-zstd.yaml rados.yaml recovery-overrides/{more-async-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-radosbench.yaml} 2
fail 5554489 2020-10-24 08:07:21 2020-10-24 08:08:47 2020-10-24 08:42:47 0:34:00 0:26:02 0:07:58 smithi master rhel 7.8 rados:thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/few.yaml objectstore/bluestore-stupid.yaml rados.yaml recovery-overrides/{default.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/default.yaml thrashosds-health.yaml workloads/ec-small-objects-fast-read.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op read 100 --op write 0 --op delete 50 --op snap_create 50 --op snap_remove 50 --op rollback 50 --op setattr 25 --op rmattr 25 --op copy_from 50 --op append 50 --op write_excl 0 --op append_excl 50 --pool unique_pool_0'

fail 5554490 2020-10-24 08:07:22 2020-10-24 08:08:47 2020-10-24 08:46:47 0:38:00 0:26:29 0:11:31 smithi master centos 7.8 rados:thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-bitmap.yaml rados.yaml recovery-overrides/{more-partial-recovery.yaml} supported-random-distro$/{centos_latest.yaml} thrashers/fastread.yaml thrashosds-health.yaml workloads/ec-small-objects-many-deletes.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 400000 --objects 20 --max-in-flight 8 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op read 0 --op write 0 --op delete 20 --op append 5 --op write_excl 0 --op append_excl 5 --pool unique_pool_0'

fail 5554491 2020-10-24 08:07:23 2020-10-24 08:08:54 2020-10-24 08:44:54 0:36:00 0:27:11 0:08:49 smithi master rhel 7.8 rados:thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/fastclose.yaml objectstore/bluestore-comp-lz4.yaml rados.yaml recovery-overrides/{more-active-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/morepggrow.yaml thrashosds-health.yaml workloads/ec-small-objects.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 400000 --objects 1024 --max-in-flight 64 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 600 --op read 100 --op write 0 --op delete 50 --op snap_create 50 --op snap_remove 50 --op rollback 50 --op setattr 25 --op rmattr 25 --op copy_from 50 --op append 50 --op write_excl 0 --op append_excl 50 --pool unique_pool_0'

fail 5554492 2020-10-24 08:07:24 2020-10-24 08:09:17 2020-10-24 08:45:17 0:36:00 0:26:52 0:09:08 smithi master rhel 7.8 rados:thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/normal.yaml msgr-failures/few.yaml objectstore/bluestore-comp-snappy.yaml rados.yaml recovery-overrides/{more-partial-recovery.yaml} supported-random-distro$/{rhel_latest.yaml} thrashers/pggrow.yaml thrashosds-health.yaml workloads/ec-rados-plugin=clay-k=4-m=2.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 0 --op delete 50 --op snap_create 50 --op snap_remove 50 --op rollback 50 --op setattr 25 --op rmattr 25 --op copy_from 50 --op append 100 --pool unique_pool_0'

fail 5554493 2020-10-24 08:07:24 2020-10-24 08:09:21 2020-10-24 08:45:21 0:36:00 0:21:17 0:14:43 smithi master ubuntu 18.04 rados:thrash-erasure-code/{ceph.yaml clusters/{fixed-2.yaml openstack.yaml} fast/fast.yaml msgr-failures/osd-delay.yaml objectstore/bluestore-comp-zlib.yaml rados.yaml recovery-overrides/{more-async-recovery.yaml} supported-random-distro$/{ubuntu_latest.yaml} thrashers/careful.yaml thrashosds-health.yaml workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml} 2
Failure Reason:

Command crashed: 'CEPH_CLIENT_ID=0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage ceph_test_rados --no-omap --ec-pool --max-ops 4000 --objects 50 --max-in-flight 16 --size 4000000 --min-stride-size 400000 --max-stride-size 800000 --max-seconds 0 --op read 100 --op write 0 --op delete 50 --op snap_create 50 --op snap_remove 50 --op rollback 50 --op setattr 25 --op rmattr 25 --op copy_from 50 --op append 100 --pool unique_pool_0'