aboutsummaryrefslogtreecommitdiffstats
path: root/kubernetes/common/music/charts/zookeeper/values.yaml
diff options
context:
space:
mode:
authorNelson,Thomas(tn1381)(arthurdent3) <tn1381@att.com>2018-09-19 16:52:36 -0400
committerTschaen, Brendan <ctschaen@att.com>2018-10-26 19:08:42 +0000
commit4807fdfc7adf7945c31368e3b420dc853a29f4cd (patch)
tree5203c5f243ed1483f7ce314d3b680d803cdf9dbb /kubernetes/common/music/charts/zookeeper/values.yaml
parenta3a690dc96c6e70ff84f7e22ba23ebd8349daa24 (diff)
Adding MUSIC Common charts
Includes: Cassandra 3.11 - Current Common cassandra is lacking security. Tomcat 8.5 - Docker Hub Zookeeper 3.4 - Chart from Incubator/Zookeeper - https://github.com/helm/charts/tree/master/incubator/zookeeper Fixed Issue with Zookeeper not starting. Was unable to write to pvc location. add copyright header to the files where it's missing remove all the trailing whitespaces bump charts version to 3.0.0 and also requirement chart to 3.0.0 Adding resource limits Update Music Chart.yaml to 3.0.0 Make recomended fixes due to resources bug. Updated port to 76 Update cassandra values. Add nodePortPrefixExt Fix Tomcat sevice Name. Update Names Add Readiness Image Update Service names Fix cassandra service Update tomcat chart Update job chart to incluede timeout. Update job chart to include delay for run Remove some unneded files and update tomcat helm readiness Replace removed files that were braking the jenkins build Made suggested changes Update Replication factor from 1 to 3,3 is default, 1 was set for testing. Move tag:version into image: Remove commented out values and aafAdminUrl Change-Id: I47eafae052cbe7355468655e5f8fcda8402bafd6 Issue-ID: MUSIC-99 Signed-off-by: Nelson,Thomas(tn1381)(arthurdent3) <tn1381@att.com> Signed-off-by: Thomas Nelson Jr arthuerdent3 <nelson24@att.com>
Diffstat (limited to 'kubernetes/common/music/charts/zookeeper/values.yaml')
-rw-r--r--kubernetes/common/music/charts/zookeeper/values.yaml284
1 files changed, 284 insertions, 0 deletions
diff --git a/kubernetes/common/music/charts/zookeeper/values.yaml b/kubernetes/common/music/charts/zookeeper/values.yaml
new file mode 100644
index 0000000000..ea02e6151e
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/values.yaml
@@ -0,0 +1,284 @@
+## As weighted quorums are not supported, it is imperative that an odd number of replicas
+## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7.
+##
+## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set
+replicaCount: 3 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7)
+
+podDisruptionBudget:
+ maxUnavailable: 1 # Limits how many Zokeeper pods may be unavailable due to voluntary disruptions.
+
+terminationGracePeriodSeconds: 1800 # Duration in seconds a Zokeeper pod needs to terminate gracefully.
+
+## OnDelete requires you to manually delete each pod when making updates.
+## This approach is at the moment safer than RollingUpdate because replication
+## may be incomplete when replication source pod is killed.
+##
+## ref: http://blog.kubernetes.io/2017/09/kubernetes-statefulsets-daemonsets.html
+updateStrategy:
+ type: OnDelete # Pods will only be created when you manually delete old pods.
+
+## refs:
+## - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper
+## - https://github.com/kubernetes/contrib/blob/master/statefulsets/zookeeper/Makefile#L1
+image:
+ #repository: nexus3.onap.org:10001/library/zookeeper
+ #tag: 3.3
+ repository: gcr.io/google_samples/k8szk # Container image repository for zookeeper container.
+ tag: v3 # Container image tag for zookeeper container.
+ pullPolicy: IfNotPresent # Image pull criteria for zookeeper container.
+
+service:
+ name: zookeeper
+ type: ClusterIP # Exposes zookeeper on a cluster-internal IP.
+ annotations: {} # Arbitrary non-identifying metadata for zookeeper service.
+ ## AWS example for use with LoadBalancer service type.
+ # external-dns.alpha.kubernetes.io/hostname: zookeeper.cluster.local
+ # service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
+ # service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ ports:
+ client:
+ port: 2181 # Service port number for client port.
+ targetPort: client # Service target port for client port.
+ protocol: TCP # Service port protocol for client port.
+
+
+ports:
+ client:
+ containerPort: 2181 # Port number for zookeeper container client port.
+ protocol: TCP # Protocol for zookeeper container client port.
+ election:
+ containerPort: 3888 # Port number for zookeeper container election port.
+ protocol: TCP # Protocol for zookeeper container election port.
+ server:
+ containerPort: 2888 # Port number for zookeeper container server port.
+ protocol: TCP # Protocol for zookeeper container server port.
+
+# Resource Limit flavor -By Default using small
+flavor: large
+# Segregation for Different environment (Small and Large)
+resources:
+ small:
+ limits:
+ cpu: 1
+ memory: 1Gi
+ requests:
+ cpu: 500m
+ memory: 500Mi
+ large:
+ limits:
+ cpu: 3
+ memory: 2Gi
+ requests:
+ cpu: 2
+ memory: 1Gi
+ unlimited: {}
+
+nodeSelector: {} # Node label-values required to run zookeeper pods.
+
+tolerations: [] # Node taint overrides for zookeeper pods.
+
+affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods.
+affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ release: zookeeper
+
+podAnnotations: {} # Arbitrary non-identifying metadata for zookeeper pods.
+
+podLabels: {} # Key/value pairs that are attached to zookeeper pods.
+
+livenessProbe:
+ exec:
+ command:
+ - zkOk.sh
+ initialDelaySeconds: 20
+
+readinessProbe:
+ exec:
+ command:
+ - zkOk.sh
+ initialDelaySeconds: 20
+
+securityContext:
+ fsGroup: 1000
+ #runAsUser: 1000
+
+persistence:
+ enabled: true
+ ## zookeeper data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ volumeReclaimPolicy: Retain
+ accessMode: ReadWriteOnce
+ mountPath: /dockerdata-nfs
+ mountSubPath: music/zookeeper
+ storageType: local
+ storageClass: ""
+ size: 4Gi
+
+## Exporters query apps for metrics and make those metrics available for
+## Prometheus to scrape.
+exporters:
+
+ jmx:
+ enabled: false
+ image:
+ repository: sscaling/jmx-prometheus-exporter
+ tag: 0.3.0
+ pullPolicy: IfNotPresent
+ config:
+ lowercaseOutputName: false
+ rules:
+ - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
+ name: "zookeeper_$2"
+ - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
+ name: "zookeeper_$3"
+ labels:
+ replicaId: "$2"
+ - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
+ name: "zookeeper_$4"
+ labels:
+ replicaId: "$2"
+ memberType: "$3"
+ - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
+ name: "zookeeper_$4_$5"
+ labels:
+ replicaId: "$2"
+ memberType: "$3"
+ startDelaySeconds: 30
+ env: {}
+ resources: {}
+ path: /metrics
+ ports:
+ jmxxp:
+ containerPort: 9404
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: jmxxp
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 60
+ failureThreshold: 8
+ successThreshold: 1
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: jmxxp
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 60
+ failureThreshold: 8
+ successThreshold: 1
+
+ zookeeper:
+ enabled: false
+ image:
+ repository: josdotso/zookeeper-exporter
+ tag: v1.1.2
+ pullPolicy: IfNotPresent
+ config:
+ logLevel: info
+ resetOnScrape: "true"
+ env: {}
+ resources: {}
+ path: /metrics
+ ports:
+ zookeeperxp:
+ containerPort: 9141
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: zookeeperxp
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 60
+ failureThreshold: 8
+ successThreshold: 1
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: zookeeperxp
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 60
+ failureThreshold: 8
+ successThreshold: 1
+
+env:
+
+ ## Options related to JMX exporter.
+ JMXAUTH: "false"
+ JMXDISABLE: "false"
+ JMXPORT: 1099
+ JMXSSL: "false"
+
+ ## The port on which the server will accept client requests.
+ ZK_CLIENT_PORT: 2181
+
+ ## The port on which the ensemble performs leader election.
+ ZK_ELECTION_PORT: 3888
+
+ ## The JVM heap size.
+ ZK_HEAP_SIZE: 2G
+
+ ## The number of Ticks that an ensemble member is allowed to perform leader
+ ## election.
+ ZK_INIT_LIMIT: 5
+
+ ## The Log Level that for the ZooKeeper processes logger.
+ ## Choices are `TRACE,DEBUG,INFO,WARN,ERROR,FATAL`.
+ ZK_LOG_LEVEL: INFO
+
+ ## The maximum number of concurrent client connections that
+ ## a server in the ensemble will accept.
+ ZK_MAX_CLIENT_CNXNS: 60
+
+ ## The maximum session timeout that the ensemble will allow a client to request.
+ ## Upstream default is `20 * ZK_TICK_TIME`
+ ZK_MAX_SESSION_TIMEOUT: 40000
+
+ ## The minimum session timeout that the ensemble will allow a client to request.
+ ## Upstream default is `2 * ZK_TICK_TIME`.
+ ZK_MIN_SESSION_TIMEOUT: 4000
+
+ ## The delay, in hours, between ZooKeeper log and snapshot cleanups.
+ ZK_PURGE_INTERVAL: 0
+
+ ## The port on which the leader will send events to followers.
+ ZK_SERVER_PORT: 2888
+
+ ## The number of snapshots that the ZooKeeper process will retain if
+ ## `ZK_PURGE_INTERVAL` is set to a value greater than `0`.
+ ZK_SNAP_RETAIN_COUNT: 3
+
+ ## The number of Tick by which a follower may lag behind the ensembles leader.
+ ZK_SYNC_LIMIT: 10
+
+ ## The number of wall clock ms that corresponds to a Tick for the ensembles
+ ## internal time.
+ ZK_TICK_TIME: 2000
+
+jobs:
+ chroots:
+ enabled: false
+ activeDeadlineSeconds: 300
+ backoffLimit: 5
+ completions: 1
+ config:
+ create: []
+ # - /kafka
+ # - /ureplicator
+ env: []
+ parallelism: 1
+ resources: {}
+ restartPolicy: Never