From 5cedc0df9a0558fd9d4772480717510b7d11a69f Mon Sep 17 00:00:00 2001 From: su622b Date: Tue, 31 Mar 2020 15:08:14 -0400 Subject: update docker-compose with frankfurt changes Issue-ID: DMAAP-1417 Change-Id: I07dea7ac630cb72f7a561fe20e8ec9fd28bdfccd Signed-off-by: su622b --- .../docker-compose/mr/MsgRtrApi.properties | 172 +++++++++++++++++++++ 1 file changed, 172 insertions(+) create mode 100644 src/main/resources/docker-compose/mr/MsgRtrApi.properties (limited to 'src/main/resources/docker-compose/mr/MsgRtrApi.properties') diff --git a/src/main/resources/docker-compose/mr/MsgRtrApi.properties b/src/main/resources/docker-compose/mr/MsgRtrApi.properties new file mode 100644 index 0000000..8f8260e --- /dev/null +++ b/src/main/resources/docker-compose/mr/MsgRtrApi.properties @@ -0,0 +1,172 @@ +# LICENSE_START======================================================= +# org.onap.dmaap +# ================================================================================ +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# +############################################################################### +############################################################################### +## +## Cambria API Server config +## +## Default values are shown as commented settings. +## +############################################################################### +## +## HTTP service +## +## 3904 is standard as of 7/29/14. +# +## Zookeeper Connection +## +## Both Cambria and Kafka make use of Zookeeper. +## +#config.zk.servers=172.18.1.1 +#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}} +config.zk.servers=zookeeper:2181 + +#config.zk.root=/fe3c/cambria/config + + +############################################################################### +## +## Kafka Connection +## +## Items below are passed through to Kafka's producer and consumer +## configurations (after removing "kafka.") +## if you want to change request.required.acks it can take this one value +#kafka.metadata.broker.list=localhost:9092,localhost:9093 +#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}} +kafka.metadata.broker.list=kafka:9092 +##kafka.request.required.acks=-1 +#kafka.client.zookeeper=${config.zk.servers} +consumer.timeout.ms=100 +zookeeper.connection.timeout.ms=6000 +zookeeper.session.timeout.ms=20000 +zookeeper.sync.time.ms=2000 +auto.commit.interval.ms=1000 +fetch.message.max.bytes =1000000 +auto.commit.enable=false + +#(backoff*retries > zksessiontimeout) +kafka.rebalance.backoff.ms=10000 +kafka.rebalance.max.retries=6 + + +############################################################################### +## +## Secured Config +## +## Some data stored in the config system is sensitive -- API keys and secrets, +## for example. to protect it, we use an encryption layer for this section +## of the config. +## +## The key is a base64 encode AES key. This must be created/configured for +## each installation. +#cambria.secureConfig.key= +## +## The initialization vector is a 16 byte value specific to the secured store. +## This must be created/configured for each installation. +#cambria.secureConfig.iv= + +## Southfield Sandbox +cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q== +cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw== +authentication.adminSecret=fe3cCompound +#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw== +#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q== + + +############################################################################### +## +## Consumer Caching +## +## Kafka expects live connections from the consumer to the broker, which +## obviously doesn't work over connectionless HTTP requests. The Cambria +## server proxies HTTP requests into Kafka consumer sessions that are kept +## around for later re-use. Not doing so is costly for setup per request, +## which would substantially impact a high volume consumer's performance. +## +## This complicates Cambria server failover, because we often need server +## A to close its connection before server B brings up the replacement. +## + +## The consumer cache is normally enabled. +#cambria.consumer.cache.enabled=true + +## Cached consumers are cleaned up after a period of disuse. The server inspects +## consumers every sweepFreqSeconds and will clean up any connections that are +## dormant for touchFreqMs. +#cambria.consumer.cache.sweepFreqSeconds=15 +cambria.consumer.cache.touchFreqMs=120000 +##stickforallconsumerrequests=false +## The cache is managed through ZK. The default value for the ZK connection +## string is the same as config.zk.servers. +#cambria.consumer.cache.zkConnect=${config.zk.servers} + +## +## Shared cache information is associated with this node's name. The default +## name is the hostname plus the HTTP service port this host runs on. (The +## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(), +## which is not always adequate.) You can set this value explicitly here. +## +#cambria.api.node.identifier= + +#cambria.rateLimit.maxEmptyPollsPerMinute=30 +#cambria.rateLimitActual.delay.ms=10 + +############################################################################### +## +## Metrics Reporting +## +## This server can report its metrics periodically on a topic. +## +#metrics.send.cambria.enabled=true +#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap +#metrics.send.cambria.sendEverySeconds=60 + +cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache +consumer.timeout=17 +default.partitions=3 +default.replicas=3 +############################################################################## +#100mb +maxcontentlength=10000 + + +############################################################################## +#AAF Properties +msgRtr.namespace.aaf=org.onap.dmaap.mr.topic +msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic: +enforced.topic.name.AAF=org.onap.dmaap.mr +forceAAF=false +transidUEBtopicreqd=false +defaultNSforUEB=org.onap.dmaap.mr +############################################################################## +#Mirror Maker Agent + +msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin +msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user +msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic: +msgRtr.mirrormaker.timeout=15000 +msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent +msgRtr.mirrormaker.consumergroup=mmagentserver +msgRtr.mirrormaker.consumerid=1 + +kafka.max.poll.interval.ms=300000 +kafka.heartbeat.interval.ms=60000 +kafka.session.timeout.ms=240000 +kafka.max.poll.records=1000 \ No newline at end of file -- cgit 1.2.3-korg