summaryrefslogtreecommitdiffstats
path: root/meta-openstack/recipes-devtools/python/python-swift
diff options
context:
space:
mode:
authorVu Tran <vu.tran@windriver.com>2014-04-09 16:44:55 -0400
committerBruce Ashfield <bruce.ashfield@windriver.com>2014-04-11 14:15:28 -0400
commit37ac94989c34c5d8d1617167f22dba18e9b087b9 (patch)
tree2bf8e6e586bca72caff69b81ec097f65b3b9773d /meta-openstack/recipes-devtools/python/python-swift
parent76e347257515f4e3ae08fa0c8f8eda935d6a572b (diff)
downloadmeta-cloud-services-37ac94989c34c5d8d1617167f22dba18e9b087b9.tar.gz
swift: separate proxy server config
There are many changes required for proxy-server config file. So instead of having sed to replace all little details, it's cleaner to add new config file that contains the changes Signed-off-by: Vu Tran <vu.tran@windriver.com>
Diffstat (limited to 'meta-openstack/recipes-devtools/python/python-swift')
-rw-r--r--meta-openstack/recipes-devtools/python/python-swift/proxy-server.conf492
1 files changed, 492 insertions, 0 deletions
diff --git a/meta-openstack/recipes-devtools/python/python-swift/proxy-server.conf b/meta-openstack/recipes-devtools/python/python-swift/proxy-server.conf
new file mode 100644
index 0000000..d005d8e
--- /dev/null
+++ b/meta-openstack/recipes-devtools/python/python-swift/proxy-server.conf
@@ -0,0 +1,492 @@
1[DEFAULT]
2# bind_ip = 0.0.0.0
3bind_port = 8888
4# bind_timeout = 30
5# backlog = 4096
6swift_dir = /etc/swift
7user = root
8#
9# Use an integer to override the number of pre-forked processes that will
10# accept connections. Should default to the number of effective cpu
11# cores in the system. It's worth noting that individual workers will
12# use many eventlet co-routines to service multiple concurrent requests.
13# workers = auto
14#
15# Maximum concurrent requests per worker
16# max_clients = 1024
17#
18# Set the following two lines to enable SSL. This is for testing only.
19# cert_file = /etc/swift/proxy.crt
20# key_file = /etc/swift/proxy.key
21#
22# expiring_objects_container_divisor = 86400
23#
24# You can specify default log routing here if you want:
25# log_name = swift
26# log_facility = LOG_LOCAL0
27# log_level = INFO
28# log_headers = false
29# log_address = /dev/log
30#
31# This optional suffix (default is empty) that would be appended to the swift transaction
32# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
33# This is very useful when one is managing more than one swift cluster.
34# trans_id_suffix =
35#
36# comma separated list of functions to call to setup custom log handlers.
37# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
38# adapted_logger
39# log_custom_handlers =
40#
41# If set, log_udp_host will override log_address
42# log_udp_host =
43# log_udp_port = 514
44#
45# You can enable StatsD logging here:
46# log_statsd_host = localhost
47# log_statsd_port = 8125
48# log_statsd_default_sample_rate = 1.0
49# log_statsd_sample_rate_factor = 1.0
50# log_statsd_metric_prefix =
51#
52# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
53# cors_allow_origin =
54#
55# client_timeout = 60
56# eventlet_debug = false
57
58[pipeline:main]
59pipeline = catch_errors healthcheck proxy-logging cache bulk slo ratelimit authtoken keystoneauth container-quotas account-quotas proxy-logging proxy-server
60
61[app:proxy-server]
62use = egg:swift#proxy
63# You can override the default log routing for this app here:
64# set log_name = proxy-server
65# set log_facility = LOG_LOCAL0
66# set log_level = INFO
67# set log_address = /dev/log
68#
69# log_handoffs = true
70# recheck_account_existence = 60
71# recheck_container_existence = 60
72# object_chunk_size = 8192
73# client_chunk_size = 8192
74# node_timeout = 10
75# conn_timeout = 0.5
76#
77# How long without an error before a node's error count is reset. This will
78# also be how long before a node is reenabled after suppression is triggered.
79# error_suppression_interval = 60
80#
81# How many errors can accumulate before a node is temporarily ignored.
82# error_suppression_limit = 10
83#
84# If set to 'true' any authorized user may create and delete accounts; if
85# 'false' no one, even authorized, can.
86# allow_account_management = false
87allow_account_management = true
88#
89# Set object_post_as_copy = false to turn on fast posts where only the metadata
90# changes are stored anew and the original data file is kept in place. This
91# makes for quicker posts; but since the container metadata isn't updated in
92# this mode, features like container sync won't be able to sync posts.
93# object_post_as_copy = true
94#
95# If set to 'true' authorized accounts that do not yet exist within the Swift
96# cluster will be automatically created.
97# account_autocreate = false
98account_autocreate = true
99#
100# If set to a positive value, trying to create a container when the account
101# already has at least this maximum containers will result in a 403 Forbidden.
102# Note: This is a soft limit, meaning a user might exceed the cap for
103# recheck_account_existence before the 403s kick in.
104# max_containers_per_account = 0
105#
106# This is a comma separated list of account hashes that ignore the
107# max_containers_per_account cap.
108# max_containers_whitelist =
109#
110# Comma separated list of Host headers to which the proxy will deny requests.
111# deny_host_headers =
112#
113# Prefix used when automatically creating accounts.
114# auto_create_account_prefix = .
115#
116# Depth of the proxy put queue.
117# put_queue_depth = 10
118#
119# Start rate-limiting object segment serving after the Nth segment of a
120# segmented object.
121# rate_limit_after_segment = 10
122#
123# Once segment rate-limiting kicks in for an object, limit segments served
124# to N per second.
125# rate_limit_segments_per_sec = 1
126#
127# Storage nodes can be chosen at random (shuffle), by using timing
128# measurements (timing), or by using an explicit match (affinity).
129# Using timing measurements may allow for lower overall latency, while
130# using affinity allows for finer control. In both the timing and
131# affinity cases, equally-sorting nodes are still randomly chosen to
132# spread load.
133# The valid values for sorting_method are "affinity", "shuffle", and "timing".
134# sorting_method = shuffle
135#
136# If the "timing" sorting_method is used, the timings will only be valid for
137# the number of seconds configured by timing_expiry.
138# timing_expiry = 300
139#
140# If set to false will treat objects with X-Static-Large-Object header set
141# as a regular object on GETs, i.e. will return that object's contents. Should
142# be set to false if slo is not used in pipeline.
143# allow_static_large_object = true
144#
145# The maximum time (seconds) that a large object connection is allowed to last.
146# max_large_object_get_time = 86400
147#
148# Set to the number of nodes to contact for a normal request. You can use
149# '* replicas' at the end to have it use the number given times the number of
150# replicas for the ring being used for the request.
151# request_node_count = 2 * replicas
152#
153# Which backend servers to prefer on reads. Format is r<N> for region
154# N or r<N>z<M> for region N, zone M. The value after the equals is
155# the priority; lower numbers are higher priority.
156#
157# Example: first read from region 1 zone 1, then region 1 zone 2, then
158# anything in region 2, then everything else:
159# read_affinity = r1z1=100, r1z2=200, r2=300
160# Default is empty, meaning no preference.
161# read_affinity =
162#
163# Which backend servers to prefer on writes. Format is r<N> for region
164# N or r<N>z<M> for region N, zone M. If this is set, then when
165# handling an object PUT request, some number (see setting
166# write_affinity_node_count) of local backend servers will be tried
167# before any nonlocal ones.
168#
169# Example: try to write to regions 1 and 2 before writing to any other
170# nodes:
171# write_affinity = r1, r2
172# Default is empty, meaning no preference.
173# write_affinity =
174#
175# The number of local (as governed by the write_affinity setting)
176# nodes to attempt to contact first, before any non-local ones. You
177# can use '* replicas' at the end to have it use the number given
178# times the number of replicas for the ring being used for the
179# request.
180# write_affinity_node_count = 2 * replicas
181#
182# These are the headers whose values will only be shown to swift_owners. The
183# exact definition of a swift_owner is up to the auth system in use, but
184# usually indicates administrative responsibilities.
185# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2
186
187
188[filter:tempauth]
189use = egg:swift#tempauth
190# You can override the default log routing for this filter here:
191# set log_name = tempauth
192# set log_facility = LOG_LOCAL0
193# set log_level = INFO
194# set log_headers = false
195# set log_address = /dev/log
196#
197# The reseller prefix will verify a token begins with this prefix before even
198# attempting to validate it. Also, with authorization, only Swift storage
199# accounts with this prefix will be authorized by this middleware. Useful if
200# multiple auth systems are in use for one Swift cluster.
201# reseller_prefix = AUTH
202#
203# The auth prefix will cause requests beginning with this prefix to be routed
204# to the auth subsystem, for granting tokens, etc.
205# auth_prefix = /auth/
206# token_life = 86400
207#
208# This allows middleware higher in the WSGI pipeline to override auth
209# processing, useful for middleware such as tempurl and formpost. If you know
210# you're not going to use such middleware and you want a bit of extra security,
211# you can set this to false.
212# allow_overrides = true
213#
214# This specifies what scheme to return with storage urls:
215# http, https, or default (chooses based on what the server is running as)
216# This can be useful with an SSL load balancer in front of a non-SSL server.
217# storage_url_scheme = default
218#
219# Lastly, you need to list all the accounts/users you want here. The format is:
220# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
221# or if you want underscores in <account> or <user>, you can base64 encode them
222# (with no equal signs) and use this format:
223# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
224# There are special groups of:
225# .reseller_admin = can do anything to any account for this auth
226# .admin = can do anything within the account
227# If neither of these groups are specified, the user can only access containers
228# that have been explicitly allowed for them by a .admin or .reseller_admin.
229# The trailing optional storage_url allows you to specify an alternate url to
230# hand back to the user upon authentication. If not specified, this defaults to
231# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
232# to what the requester would need to use to reach this host.
233# Here are example entries, required for running the tests:
234user_admin_admin = admin .admin .reseller_admin
235user_test_tester = testing .admin
236user_test2_tester2 = testing2 .admin
237user_test_tester3 = testing3
238
239# To enable Keystone authentication you need to have the auth token
240# middleware first to be configured. Here is an example below, please
241# refer to the keystone's documentation for details about the
242# different settings.
243#
244# You'll need to have as well the keystoneauth middleware enabled
245# and have it in your main pipeline so instead of having tempauth in
246# there you can change it to: authtoken keystoneauth
247#
248[filter:authtoken]
249paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
250auth_host = 127.0.0.1
251auth_port = 35357
252auth_protocol = http
253# auth_uri = http://127.0.0.1:5000/
254admin_tenant_name = %SERVICE_TENANT_NAME%
255admin_user = %SERVICE_USER%
256admin_password = %SERVICE_PASSWORD%
257# delay_auth_decision = 1
258# cache = swift.cache
259#
260[filter:keystoneauth]
261use = egg:swift#keystoneauth
262# Operator roles is the role which user would be allowed to manage a
263# tenant and be able to create container or give ACL to others.
264operator_roles = admin, swiftoperator, Member
265# The reseller admin role has the ability to create and delete accounts
266reseller_admin_role = ResellerAdmin
267
268[filter:healthcheck]
269use = egg:swift#healthcheck
270# An optional filesystem path, which if present, will cause the healthcheck
271# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
272# This facility may be used to temporarily remove a Swift node from a load
273# balancer pool during maintenance or upgrade (remove the file to allow the
274# node back into the load balancer pool).
275# disable_path =
276
277[filter:cache]
278use = egg:swift#memcache
279# You can override the default log routing for this filter here:
280# set log_name = cache
281# set log_facility = LOG_LOCAL0
282# set log_level = INFO
283# set log_headers = false
284# set log_address = /dev/log
285#
286# If not set here, the value for memcache_servers will be read from
287# memcache.conf (see memcache.conf-sample) or lacking that file, it will
288# default to the value below. You can specify multiple servers separated with
289# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
290# memcache_servers = 127.0.0.1:11211
291#
292# Sets how memcache values are serialized and deserialized:
293# 0 = older, insecure pickle serialization
294# 1 = json serialization but pickles can still be read (still insecure)
295# 2 = json serialization only (secure and the default)
296# If not set here, the value for memcache_serialization_support will be read
297# from /etc/swift/memcache.conf (see memcache.conf-sample).
298# To avoid an instant full cache flush, existing installations should
299# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
300# set to 2 and reload.
301# In the future, the ability to use pickle serialization will be removed.
302# memcache_serialization_support = 2
303
304[filter:ratelimit]
305use = egg:swift#ratelimit
306# You can override the default log routing for this filter here:
307# set log_name = ratelimit
308# set log_facility = LOG_LOCAL0
309# set log_level = INFO
310# set log_headers = false
311# set log_address = /dev/log
312#
313# clock_accuracy should represent how accurate the proxy servers' system clocks
314# are with each other. 1000 means that all the proxies' clock are accurate to
315# each other within 1 millisecond. No ratelimit should be higher than the
316# clock accuracy.
317# clock_accuracy = 1000
318#
319# max_sleep_time_seconds = 60
320#
321# log_sleep_time_seconds of 0 means disabled
322# log_sleep_time_seconds = 0
323#
324# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
325# rate_buffer_seconds = 5
326#
327# account_ratelimit of 0 means disabled
328# account_ratelimit = 0
329
330# these are comma separated lists of account names
331# account_whitelist = a,b
332# account_blacklist = c,d
333
334# with container_limit_x = r
335# for containers of size x limit write requests per second to r. The container
336# rate will be linearly interpolated from the values given. With the values
337# below, a container of size 5 will get a rate of 75.
338# container_ratelimit_0 = 100
339# container_ratelimit_10 = 50
340# container_ratelimit_50 = 20
341
342# Similarly to the above container-level write limits, the following will limit
343# container GET (listing) requests.
344# container_listing_ratelimit_0 = 100
345# container_listing_ratelimit_10 = 50
346# container_listing_ratelimit_50 = 20
347
348[filter:domain_remap]
349use = egg:swift#domain_remap
350# You can override the default log routing for this filter here:
351# set log_name = domain_remap
352# set log_facility = LOG_LOCAL0
353# set log_level = INFO
354# set log_headers = false
355# set log_address = /dev/log
356#
357# storage_domain = example.com
358# path_root = v1
359# reseller_prefixes = AUTH
360
361[filter:catch_errors]
362use = egg:swift#catch_errors
363# You can override the default log routing for this filter here:
364# set log_name = catch_errors
365# set log_facility = LOG_LOCAL0
366# set log_level = INFO
367# set log_headers = false
368# set log_address = /dev/log
369
370[filter:cname_lookup]
371# Note: this middleware requires python-dnspython
372use = egg:swift#cname_lookup
373# You can override the default log routing for this filter here:
374# set log_name = cname_lookup
375# set log_facility = LOG_LOCAL0
376# set log_level = INFO
377# set log_headers = false
378# set log_address = /dev/log
379#
380# storage_domain = example.com
381# lookup_depth = 1
382
383# Note: Put staticweb just after your auth filter(s) in the pipeline
384[filter:staticweb]
385use = egg:swift#staticweb
386
387# Note: Put tempurl just before your auth filter(s) in the pipeline
388[filter:tempurl]
389use = egg:swift#tempurl
390# The methods allowed with Temp URLs.
391# methods = GET HEAD PUT
392#
393# The headers to remove from incoming requests. Simply a whitespace delimited
394# list of header names and names can optionally end with '*' to indicate a
395# prefix match. incoming_allow_headers is a list of exceptions to these
396# removals.
397# incoming_remove_headers = x-timestamp
398#
399# The headers allowed as exceptions to incoming_remove_headers. Simply a
400# whitespace delimited list of header names and names can optionally end with
401# '*' to indicate a prefix match.
402# incoming_allow_headers =
403#
404# The headers to remove from outgoing responses. Simply a whitespace delimited
405# list of header names and names can optionally end with '*' to indicate a
406# prefix match. outgoing_allow_headers is a list of exceptions to these
407# removals.
408# outgoing_remove_headers = x-object-meta-*
409#
410# The headers allowed as exceptions to outgoing_remove_headers. Simply a
411# whitespace delimited list of header names and names can optionally end with
412# '*' to indicate a prefix match.
413# outgoing_allow_headers = x-object-meta-public-*
414
415# Note: Put formpost just before your auth filter(s) in the pipeline
416[filter:formpost]
417use = egg:swift#formpost
418
419# Note: Just needs to be placed before the proxy-server in the pipeline.
420[filter:name_check]
421use = egg:swift#name_check
422# forbidden_chars = '"`<>
423# maximum_length = 255
424# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
425
426[filter:list-endpoints]
427use = egg:swift#list_endpoints
428# list_endpoints_path = /endpoints/
429
430[filter:proxy-logging]
431use = egg:swift#proxy_logging
432# If not set, logging directives from [DEFAULT] without "access_" will be used
433# access_log_name = swift
434# access_log_facility = LOG_LOCAL0
435# access_log_level = INFO
436# access_log_address = /dev/log
437#
438# If set, access_log_udp_host will override access_log_address
439# access_log_udp_host =
440# access_log_udp_port = 514
441#
442# You can use log_statsd_* from [DEFAULT] or override them here:
443# access_log_statsd_host = localhost
444# access_log_statsd_port = 8125
445# access_log_statsd_default_sample_rate = 1.0
446# access_log_statsd_sample_rate_factor = 1.0
447# access_log_statsd_metric_prefix =
448# access_log_headers = false
449#
450# By default, the X-Auth-Token is logged. To obscure the value,
451# set reveal_sensitive_prefix to the number of characters to log.
452# For example, if set to 12, only the first 12 characters of the
453# token appear in the log. An unauthorized access of the log file
454# won't allow unauthorized usage of the token. However, the first
455# 12 or so characters is unique enough that you can trace/debug
456# token usage. Set to 0 to suppress the token completely (replaced
457# by '...' in the log).
458# Note: reveal_sensitive_prefix will not affect the value
459# logged with access_log_headers=True.
460# reveal_sensitive_prefix = 8192
461#
462# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
463# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
464# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
465#
466# Note: The double proxy-logging in the pipeline is not a mistake. The
467# left-most proxy-logging is there to log requests that were handled in
468# middleware and never made it through to the right-most middleware (and
469# proxy server). Double logging is prevented for normal requests. See
470# proxy-logging docs.
471
472# Note: Put before both ratelimit and auth in the pipeline.
473[filter:bulk]
474use = egg:swift#bulk
475# max_containers_per_extraction = 10000
476# max_failed_extractions = 1000
477# max_deletes_per_request = 10000
478# yield_frequency = 60
479
480# Note: Put after auth in the pipeline.
481[filter:container-quotas]
482use = egg:swift#container_quotas
483
484# Note: Put before both ratelimit and auth in the pipeline.
485[filter:slo]
486use = egg:swift#slo
487# max_manifest_segments = 1000
488# max_manifest_size = 2097152
489# min_segment_size = 1048576
490
491[filter:account-quotas]
492use = egg:swift#account_quotas