# Copyright © 2017 Amdocs, Bell Canada # Modifications Copyright © 2018 AT&T # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################# # Global configuration defaults. ################################################################# global: nodePortPrefix: 302 persistence: {} ################################################################# # AAF part ################################################################# certInitializer: nameOverride: dmaap-mr-kafka-cert-initializer aafDeployFqi: deployer@people.osaaf.org aafDeployPass: demo123456! # aafDeployCredsExternalSecret: some secret fqdn: dmaap-mr fqi: dmaapmr@mr.dmaap.onap.org public_fqdn: mr.dmaap.onap.org cadi_longitude: "-122.26147" cadi_latitude: "37.78187" app_ns: org.osaaf.aaf credsPath: /opt/app/osaaf/local fqi_namespace: org.onap.dmaap.mr final_cadi_files: cadi.properties aaf_add_config: | echo "*** concat the three prop files" cd {{ .Values.credsPath }} cat {{ .Values.fqi_namespace }}.props > {{ .Values.final_cadi_files }} cat {{ .Values.fqi_namespace }}.cred.props >> {{ .Values.final_cadi_files }} cat {{ .Values.fqi_namespace }}.location.props >> {{ .Values.final_cadi_files }} echo "*** configuration result:" cat {{ .Values.final_cadi_files }} chown -R 1000 . ################################################################# # Application configuration defaults. ################################################################# # application image image: onap/dmaap/kafka111:1.0.5 pullPolicy: Always zookeeper: name: message-router-zookeeper port: 2181 replicaCount: 3 kafka: heapOptions: -Xmx5G -Xms1G jaasOptions: -Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf jaasOptionsAaf: -Djava.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf enableSupport: false protocolMapAaf: INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT protocolMap: INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT listenersAaf: EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 listeners: EXTERNAL_PLAINTEXT://0.0.0.0:9091,INTERNAL_PLAINTEXT://0.0.0.0:9092 authorizer: org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer saslInterBrokerProtocol: PLAIN saslMech: PLAIN interBrokerListernerAaf: INTERNAL_SASL_PLAINTEXT interBrokerListerner: INTERNAL_PLAINTEXT configurationOverrides: "offsets.topic.replication.factor": "3" "log.dirs": "/var/lib/kafka/data" "log.retention.hours": "168" "num.partitions": "3" "offsets.topic.replication.factor": "3" "transaction.state.log.replication.factor": "1" "transaction.state.log.min.isr": "1" "num.recovery.threads.per.data.dir": "5" "log.retention.hours": "168" "zookeeper.connection.timeout.ms": "6000" "default.replication.factor": "3" "zookeeper.set.acl": "true" jmx: port: 5555 prometheus: jmx: enabled: false image: solsson/kafka-prometheus-jmx-exporter@sha256 imageTag: 6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143 port: 5556 jaas: config: zkClient: kafka zkClientPassword: kafka_secret kafkaAdminUser: admin kafkaAdminPassword: admin_secret #kafkaAdminUserExternal: some secret #zkClientPasswordExternal: some secret secrets: - uid: zk-client type: basicAuth externalSecret: '{{ .Values.jaas.config.zkClientPasswordExternal}}' login: '{{ .Values.jaas.config.zkClient }}' password: '{{ .Values.jaas.config.zkClientPassword }}' passwordPolicy: required - uid: kafka-admin type: basicAuth externalSecret: '{{ .Values.jaas.config.kafkaAdminUserExternal}}' login: '{{ .Values.jaas.config.kafkaAdminUser }}' password: '{{ .Values.jaas.config.kafkaAdminPassword }}' passwordPolicy: required # flag to enable debugging - application support required debugEnabled: false # default number of instances replicaCount: 3 # To access Kafka outside cluster, this value must be set to hard and the number of nodes in K8S cluster must be equal or greater then replica count podAntiAffinityType: soft # defult partitions defaultpartitions: 3 nodeSelector: {} nodeAffinity: {} affinity: {} tolerations: {} # probe configuration parameters liveness: initialDelaySeconds: 70 periodSeconds: 20 timeoutSeconds: 10 # necessary to disable liveness probe when setting breakpoints # in debugger so K8s doesn't restart unresponsive container enabled: true readiness: initialDelaySeconds: 90 periodSeconds: 20 timeoutSeconds: 100 ## Persist data to a persitent volume persistence: enabled: true ## A manually managed Persistent Volume and Claim ## Requires persistence.enabled: true ## If defined, PVC must be created manually before volume will be bound # existingClaim: volumeReclaimPolicy: Retain ## database data Persistent Volume Storage Class ## If defined, storageClassName: ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 2Gi mountPath: /dockerdata-nfs mountSubPath: message-router/data-kafka service: type: NodePort name: message-router-kafka portName: tcp-message-router-kafka internalPort: 9092 internalSSLPort: 9093 externalPort: 9091 baseNodePort: 30490 ingress: enabled: false # Resource Limit flavor -By Default using small flavor: small # Segregation for Different environment (Small and Large) resources: small: limits: cpu: 2000m memory: 4Gi requests: cpu: 500m memory: 1Gi large: limits: cpu: 4000m memory: 8Gi requests: cpu: 1000m memory: 2Gi unlimited: {} #Pods Service Account serviceAccount: nameOverride: message-router-kafka roles: - read