- log_href:
http://qa-proxy.ceph.com/teuthology/aemerson-2020-06-29_22:15:28-rgw-wip-datalog-fifo-distro-basic-smithi/5190280/teuthology.log
- archive_path:
/home/teuthworker/archive/aemerson-2020-06-29_22:15:28-rgw-wip-datalog-fifo-distro-basic-smithi/5190280
- description:
rgw/multisite/{clusters frontend/civetweb omap_limits overrides realms/two-zonegroup tasks/test_multi valgrind}
- duration:
0:09:00
- email:
aemerson@redhat.com
- failure_reason:
- flavor:
basic
- job_id:
5190280
- kernel:
- last_in_suite:
False
- machine_type:
smithi
- name:
aemerson-2020-06-29_22:15:28-rgw-wip-datalog-fifo-distro-basic-smithi
- nuke_on_error:
True
- os_type:
ubuntu
- os_version:
- overrides:
- ceph-deploy:
- conf:
- client:
- log file:
/var/log/ceph/ceph-$name.$pid.log
- mon:
- rgw:
- compression type:
random
- frontend:
civetweb
- rgw-multisite:
- zonegroups:
-
- is_default:
True
- is_master:
True
- endpoints:
- name:
a
-
- is_default:
True
- endpoints:
- name:
b
- realm:
- is default:
True
- name:
test-realm
workunit:
- sha1:
4aaa9f6d25ef19c5714d6b8102e2518bad2f4eb4
- branch:
wip-datalog-fifo
ceph:
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
\(PG_AVAILABILITY\)
- wait-for-scrub:
False
- conf:
- osd.2:
- osd_max_omap_entries_per_request:
10000
- osd.1:
- osd_max_omap_entries_per_request:
1000
- osd.0:
- osd_max_omap_entries_per_request:
10
- global:
- mgr:
- debug ms:
1
- debug mgr:
20
- client:
- rgw crypt require ssl:
False
- rgw md log max shards:
4
- rgw crypt s3 kms encryption keys:
testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo=
- setuser:
ceph
- rgw crypt s3 kms backend:
testing
- rgw sync obj etag verify:
True
- rgw data log num shards:
4
- setgroup:
ceph
- debug rgw:
20
- rgw sync log trim interval:
0
- rgw curl low speed time:
300
- mon:
- debug paxos:
20
- debug mon:
20
- debug ms:
1
- mon osd crush smoke test:
False
- osd:
- debug ms:
20
- debug journal:
20
- debug osd:
25
- osd fast shutdown:
False
- debug filestore:
20
- rocksdb delete range threshold:
0
- sha1:
4aaa9f6d25ef19c5714d6b8102e2518bad2f4eb4
install:
- ceph:
- sha1:
4aaa9f6d25ef19c5714d6b8102e2518bad2f4eb4
admin_socket:
owner:
scheduled_aemerson@teuthology
pid:
roles:
-
['c1.mon.a', 'c1.mgr.x', 'c1.osd.0', 'c1.osd.1', 'c1.osd.2', 'c1.client.0', 'c1.client.1']
-
['c2.mon.a', 'c2.mgr.x', 'c2.osd.0', 'c2.osd.1', 'c2.osd.2', 'c2.client.0', 'c2.client.1']
sentry_event:
http://sentry.ceph.com/sepia/teuthology/?q=a65e3768f6d6416182e7fdd38828f227
status:
fail
success:
False
branch:
wip-datalog-fifo
seed:
sha1:
4aaa9f6d25ef19c5714d6b8102e2518bad2f4eb4
subset:
suite:
suite_branch:
wip-datalog-fifo
suite_path:
suite_relpath:
suite_repo:
suite_sha1:
4aaa9f6d25ef19c5714d6b8102e2518bad2f4eb4
targets:
- smithi059.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+H+PSwkdJ8iQm6CD1kW2xiaSLPfJ6cp+F+J7qlLAAcCrRkoibgVNyFPL4Q898wW4JcMVANPanhX3Itf5KeGieHr4OK+jdEkleMshrd0Yk+Qjg5lAIU/azaoEfZdIs7NMZpYF1kW/gRs83n2swsmWoXoC66mNybIbtcChR4TZOM9lW6tuuY9QAorLsJF1BQX4uV35F/MjuJNoxpuMk5xxWhG3c6ncpEN/ENWq6D1KgP7pvjrRHmpjxmjq6sMAW3RiwJ7YVRUKTaUbzS7WY4Z84CgDqndpyeqIe7aIeiuUCEWMHr8/ICS/rGXgr5Q3WYyoFzbMb9LhfXhJ1vqibQ9f/
- smithi191.front.sepia.ceph.com:
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUDXS07xQWe5g1PYrxdaof48sNJG6kwY+WVPkvXtqdDb6I+0+DW3iB1gKXa/zepqOOVMjIvppAxwXss4NimzSCq6vJHh8RPYxa5xD32zswqp0VcGqqYLJ23gwqIs5reYMgZSo4hTMZ5TWSCZNqpzJAuyFXTKj64fm6jqaQsz0CAsNeFDHYGFgruEYxBeJuIaK18tMTNv2NTZ+lW24KuZYFBgr+rDoK8v4+Z7uGcVa1x0dK6u2dgcmKURD98hJoOi8fyBpVWMTq3kzWsneMwDzKJolOPrc/iAXCzAX57VGwEWfQm0aLzHhIyuq5uL5ARqODYf6ccGUWurKO5N2AQfWp
tasks:
internal.buildpackages_prep:
internal.lock_machines:
internal.save_config:
internal.check_lock:
internal.add_remotes:
console_log:
internal.connect:
internal.push_inventory:
internal.serialize_remote_roles:
internal.check_conflict:
internal.check_ceph_data:
internal.vm_setup:
kernel:
internal.base:
internal.archive_upload:
internal.archive:
internal.coredump:
internal.sudo:
internal.syslog:
internal.timer:
pcp:
selinux:
ansible.cephlab:
clock:
install:
ceph:
- cluster:
c1
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
\(PG_AVAILABILITY\)
- wait-for-scrub:
False
- conf:
- osd.2:
- osd_max_omap_entries_per_request:
10000
- osd.1:
- osd_max_omap_entries_per_request:
1000
- osd.0:
- osd_max_omap_entries_per_request:
10
- global:
- mgr:
- debug ms:
1
- debug mgr:
20
- client:
- rgw crypt require ssl:
False
- rgw md log max shards:
4
- rgw crypt s3 kms encryption keys:
testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo=
- setuser:
ceph
- rgw crypt s3 kms backend:
testing
- rgw sync obj etag verify:
True
- rgw data log num shards:
4
- setgroup:
ceph
- debug rgw:
20
- rgw sync log trim interval:
0
- rgw curl low speed time:
300
- mon:
- debug paxos:
20
- debug mon:
20
- debug ms:
1
- mon osd crush smoke test:
False
- osd:
- debug ms:
20
- debug journal:
20
- debug osd:
25
- osd fast shutdown:
False
- debug filestore:
20
- rocksdb delete range threshold:
0
- sha1:
4aaa9f6d25ef19c5714d6b8102e2518bad2f4eb4
ceph:
- cluster:
c2
- log-whitelist:
-
\(MDS_ALL_DOWN\)
-
\(MDS_UP_LESS_THAN_MAX\)
-
\(PG_AVAILABILITY\)
- wait-for-scrub:
False
- conf:
- osd.2:
- osd_max_omap_entries_per_request:
10000
- osd.1:
- osd_max_omap_entries_per_request:
1000
- osd.0:
- osd_max_omap_entries_per_request:
10
- global:
- mgr:
- debug ms:
1
- debug mgr:
20
- client:
- rgw crypt require ssl:
False
- rgw md log max shards:
4
- rgw crypt s3 kms encryption keys:
testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo=
- setuser:
ceph
- rgw crypt s3 kms backend:
testing
- rgw sync obj etag verify:
True
- rgw data log num shards:
4
- setgroup:
ceph
- debug rgw:
20
- rgw sync log trim interval:
0
- rgw curl low speed time:
300
- mon:
- debug paxos:
20
- debug mon:
20
- debug ms:
1
- mon osd crush smoke test:
False
- osd:
- debug ms:
20
- debug journal:
20
- debug osd:
25
- osd fast shutdown:
False
- debug filestore:
20
- rocksdb delete range threshold:
0
- sha1:
4aaa9f6d25ef19c5714d6b8102e2518bad2f4eb4
rgw:
- c2.client.1:
- c2.client.0:
- c1.client.0:
- c1.client.1:
rgw-multisite:
rgw-multisite-tests:
teuthology_branch:
master
verbose:
False
pcp_grafana_url:
priority:
user:
queue:
posted:
2020-06-29 22:15:47
started:
2020-06-30 13:33:29
updated:
2020-06-30 13:53:29
status_class:
danger
runtime:
0:20:00
wait_time:
0:11:00