diff options
Diffstat (limited to 'kubernetes/dmaap/components/message-router/resources/config')
-rwxr-xr-x | kubernetes/dmaap/components/message-router/resources/config/dmaap/MsgRtrApi.properties | 50 |
1 files changed, 24 insertions, 26 deletions
diff --git a/kubernetes/dmaap/components/message-router/resources/config/dmaap/MsgRtrApi.properties b/kubernetes/dmaap/components/message-router/resources/config/dmaap/MsgRtrApi.properties index b07eaad9b5..25b29a583b 100755 --- a/kubernetes/dmaap/components/message-router/resources/config/dmaap/MsgRtrApi.properties +++ b/kubernetes/dmaap/components/message-router/resources/config/dmaap/MsgRtrApi.properties @@ -7,7 +7,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,28 +16,27 @@ # ============LICENSE_END========================================================= # # ECOMP is a trademark and service mark of AT&T Intellectual Property. -# +# ############################################################################### ############################################################################### ## ## Cambria API Server config ## -## - Default values are shown as commented settings. +## Default values are shown as commented settings. ## - ############################################################################### ## ## HTTP service ## -## - 3904 is standard as of 7/29/14. +## 3904 is standard as of 7/29/14. # ## Zookeeper Connection ## -## Both Cambria and Kafka make use of Zookeeper. +## Both Cambria and Kafka make use of Zookeeper. ## #config.zk.servers=172.18.1.1 #config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}} -config.zk.servers={{.Release.Name}}-{{.Values.zookeeper.name}}-0.{{.Values.zookeeper.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.zookeeper.port}},{{.Release.Name}}-{{.Values.zookeeper.name}}-1.{{.Values.zookeeper.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.zookeeper.port}},{{.Release.Name}}-{{.Values.zookeeper.name}}-2.{{.Values.zookeeper.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.zookeeper.port}} +config.zk.servers={{include "common.release" .}}-{{.Values.zookeeper.name}}-0.{{.Values.zookeeper.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.zookeeper.port}},{{include "common.release" .}}-{{.Values.zookeeper.name}}-1.{{.Values.zookeeper.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.zookeeper.port}},{{include "common.release" .}}-{{.Values.zookeeper.name}}-2.{{.Values.zookeeper.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.zookeeper.port}} #config.zk.root=/fe3c/cambria/config @@ -46,12 +45,12 @@ config.zk.servers={{.Release.Name}}-{{.Values.zookeeper.name}}-0.{{.Values.zooke ## ## Kafka Connection ## -## Items below are passed through to Kafka's producer and consumer -## configurations (after removing "kafka.") -## if you want to change request.required.acks it can take this one value +## Items below are passed through to Kafka's producer and consumer +## configurations (after removing "kafka.") +## if you want to change request.required.acks it can take this one value #kafka.metadata.broker.list=localhost:9092,localhost:9093 #kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}} -kafka.metadata.broker.list={{.Release.Name}}-{{.Values.kafka.name}}-0.{{.Values.kafka.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.kafka.port}},{{.Release.Name}}-{{.Values.kafka.name}}-1.{{.Values.kafka.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.kafka.port}},{{.Release.Name}}-{{.Values.kafka.name}}-2.{{.Values.kafka.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.kafka.port}} +kafka.metadata.broker.list={{include "common.release" .}}-{{.Values.kafka.name}}-0.{{.Values.kafka.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.kafka.port}},{{include "common.release" .}}-{{.Values.kafka.name}}-1.{{.Values.kafka.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.kafka.port}},{{include "common.release" .}}-{{.Values.kafka.name}}-2.{{.Values.kafka.name}}.{{.Release.Namespace}}.svc.cluster.local:{{.Values.kafka.port}} ##kafka.request.required.acks=-1 #kafka.client.zookeeper=${config.zk.servers} consumer.timeout.ms=100 @@ -69,11 +68,11 @@ kafka.rebalance.max.retries=6 ############################################################################### ## -## Secured Config +## Secured Config ## -## Some data stored in the config system is sensitive -- API keys and secrets, -## for example. to protect it, we use an encryption layer for this section -## of the config. +## Some data stored in the config system is sensitive -- API keys and secrets, +## for example. to protect it, we use an encryption layer for this section +## of the config. ## ## The key is a base64 encode AES key. This must be created/configured for ## each installation. @@ -95,14 +94,14 @@ authentication.adminSecret=fe3cCompound ## ## Consumer Caching ## -## Kafka expects live connections from the consumer to the broker, which -## obviously doesn't work over connectionless HTTP requests. The Cambria -## server proxies HTTP requests into Kafka consumer sessions that are kept -## around for later re-use. Not doing so is costly for setup per request, -## which would substantially impact a high volume consumer's performance. +## Kafka expects live connections from the consumer to the broker, which +## obviously doesn't work over connectionless HTTP requests. The Cambria +## server proxies HTTP requests into Kafka consumer sessions that are kept +## around for later re-use. Not doing so is costly for setup per request, +## which would substantially impact a high volume consumer's performance. ## -## This complicates Cambria server failover, because we often need server -## A to close its connection before server B brings up the replacement. +## This complicates Cambria server failover, because we often need server +## A to close its connection before server B brings up the replacement. ## ## The consumer cache is normally enabled. @@ -133,10 +132,10 @@ cambria.consumer.cache.touchFreqMs=120000 ## ## Metrics Reporting ## -## This server can report its metrics periodically on a topic. +## This server can report its metrics periodically on a topic. ## #metrics.send.cambria.enabled=true -#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap +#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap #metrics.send.cambria.sendEverySeconds=60 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache @@ -170,5 +169,4 @@ msgRtr.mirrormaker.consumerid=1 kafka.max.poll.interval.ms=300000 kafka.heartbeat.interval.ms=60000 kafka.session.timeout.ms=240000 -kafka.max.poll.records=1000 - +kafka.max.poll.records=1000
\ No newline at end of file |