summaryrefslogtreecommitdiffstats
path: root/kubernetes/common/music/charts/zookeeper
diff options
context:
space:
mode:
Diffstat (limited to 'kubernetes/common/music/charts/zookeeper')
-rw-r--r--kubernetes/common/music/charts/zookeeper/.helmignore21
-rw-r--r--kubernetes/common/music/charts/zookeeper/Chart.yaml15
-rw-r--r--kubernetes/common/music/charts/zookeeper/OWNERS6
-rw-r--r--kubernetes/common/music/charts/zookeeper/README.md140
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/NOTES.txt7
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/_helpers.tpl32
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/config-jmx-exporter.yaml19
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/job-chroots.yaml62
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/poddisruptionbudget.yaml17
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/service-headless.yaml21
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/service.yaml23
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/statefulset.yaml188
-rw-r--r--kubernetes/common/music/charts/zookeeper/templates/volumes.yaml25
-rw-r--r--kubernetes/common/music/charts/zookeeper/values.yaml284
14 files changed, 860 insertions, 0 deletions
diff --git a/kubernetes/common/music/charts/zookeeper/.helmignore b/kubernetes/common/music/charts/zookeeper/.helmignore
new file mode 100644
index 0000000000..f0c1319444
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/kubernetes/common/music/charts/zookeeper/Chart.yaml b/kubernetes/common/music/charts/zookeeper/Chart.yaml
new file mode 100644
index 0000000000..01e81736f6
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/Chart.yaml
@@ -0,0 +1,15 @@
+name: zookeeper
+home: https://zookeeper.apache.org/
+version: 1.0.2
+appVersion: 3.4.10
+description: Centralized service for maintaining configuration information, naming,
+ providing distributed synchronization, and providing group services.
+icon: https://zookeeper.apache.org/images/zookeeper_small.gif
+sources:
+- https://github.com/apache/zookeeper
+- https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper
+maintainers:
+- name: lachie83
+ email: lachlan.evenson@microsoft.com
+- name: kow3ns
+ email: owensk@google.com
diff --git a/kubernetes/common/music/charts/zookeeper/OWNERS b/kubernetes/common/music/charts/zookeeper/OWNERS
new file mode 100644
index 0000000000..dd9facde2a
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/OWNERS
@@ -0,0 +1,6 @@
+approvers:
+- lachie83
+- kow3ns
+reviewers:
+- lachie83
+- kow3ns
diff --git a/kubernetes/common/music/charts/zookeeper/README.md b/kubernetes/common/music/charts/zookeeper/README.md
new file mode 100644
index 0000000000..22bbac49dc
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/README.md
@@ -0,0 +1,140 @@
+# incubator/zookeeper
+
+This helm chart provides an implementation of the ZooKeeper [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) found in Kubernetes Contrib [Zookeeper StatefulSet](https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper).
+
+## Prerequisites
+* Kubernetes 1.6+
+* PersistentVolume support on the underlying infrastructure
+* A dynamic provisioner for the PersistentVolumes
+* A familiarity with [Apache ZooKeeper 3.4.x](https://zookeeper.apache.org/doc/current/)
+
+## Chart Components
+This chart will do the following:
+
+* Create a fixed size ZooKeeper ensemble using a [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/).
+* Create a [PodDisruptionBudget](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-disruption-budget/) so kubectl drain will respect the Quorum size of the ensemble.
+* Create a [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/) to control the domain of the ZooKeeper ensemble.
+* Create a Service configured to connect to the available ZooKeeper instance on the configured client port.
+* Optionally apply a [Pod Anti-Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) to spread the ZooKeeper ensemble across nodes.
+* Optionally start JMX Exporter and Zookeeper Exporter containers inside Zookeeper pods.
+* Optionally create a job which creates Zookeeper chroots (e.g. `/kafka1`).
+
+## Installing the Chart
+You can install the chart with the release name `zookeeper` as below.
+
+```console
+$ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator
+$ helm install --name zookeeper incubator/zookeeper
+```
+
+If you do not specify a name, helm will select a name for you.
+
+### Installed Components
+You can use `kubectl get` to view all of the installed components.
+
+```console{%raw}
+$ kubectl get all -l app=zookeeper
+NAME: zookeeper
+LAST DEPLOYED: Wed Apr 11 17:09:48 2018
+NAMESPACE: default
+STATUS: DEPLOYED
+
+RESOURCES:
+==> v1beta1/PodDisruptionBudget
+NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE
+zookeeper N/A 1 1 2m
+
+==> v1/Service
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+zookeeper-headless ClusterIP None <none> 2181/TCP,3888/TCP,2888/TCP 2m
+zookeeper ClusterIP 10.98.179.165 <none> 2181/TCP 2m
+
+==> v1beta1/StatefulSet
+NAME DESIRED CURRENT AGE
+zookeeper 3 3 2m
+```
+
+1. `statefulsets/zookeeper` is the StatefulSet created by the chart.
+1. `po/zookeeper-<0|1|2>` are the Pods created by the StatefulSet. Each Pod has a single container running a ZooKeeper server.
+1. `svc/zookeeper-headless` is the Headless Service used to control the network domain of the ZooKeeper ensemble.
+1. `svc/zookeeper` is a Service that can be used by clients to connect to an available ZooKeeper server.
+
+## Configuration
+You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install --name my-release -f values.yaml incubator/zookeeper
+```
+
+## Default Values
+
+- You can find all user-configurable settings, their defaults and commentary about them in [values.yaml](values.yaml).
+
+## Deep Dive
+
+## Image Details
+The image used for this chart is based on Ubuntu 16.04 LTS. This image is larger than Alpine or BusyBox, but it provides glibc, rather than ulibc or mucl, and a JVM release that is built against it. You can easily convert this chart to run against a smaller image with a JVM that is built against that image's libc. However, as far as we know, no Hadoop vendor supports, or has verified, ZooKeeper running on such a JVM.
+
+## JVM Details
+The Java Virtual Machine used for this chart is the OpenJDK JVM 8u111 JRE (headless).
+
+## ZooKeeper Details
+The ZooKeeper version is the latest stable version (3.4.10). The distribution is installed into /opt/zookeeper-3.4.10. This directory is symbolically linked to /opt/zookeeper. Symlinks are created to simulate a rpm installation into /usr.
+
+## Failover
+You can test failover by killing the leader. Insert a key:
+```console
+$ kubectl exec zookeeper-0 -- /opt/zookeeper/bin/zkCli.sh create /foo bar;
+$ kubectl exec zookeeper-2 -- /opt/zookeeper/bin/zkCli.sh get /foo;
+```
+
+Watch existing members:
+```console
+$ kubectl run --attach bbox --image=busybox --restart=Never -- sh -c 'while true; do for i in 0 1 2; do echo zk-${i} $(echo stats | nc <pod-name>-${i}.<headless-service-name>:2181 | grep Mode); sleep 1; done; done';
+
+zk-2 Mode: follower
+zk-0 Mode: follower
+zk-1 Mode: leader
+zk-2 Mode: follower
+```
+
+Delete Pods and wait for the StatefulSet controller to bring them back up:
+```console
+$ kubectl delete po -l app=zookeeper
+$ kubectl get po --watch-only
+NAME READY STATUS RESTARTS AGE
+zookeeper-0 0/1 Running 0 35s
+zookeeper-0 1/1 Running 0 50s
+zookeeper-1 0/1 Pending 0 0s
+zookeeper-1 0/1 Pending 0 0s
+zookeeper-1 0/1 ContainerCreating 0 0s
+zookeeper-1 0/1 Running 0 19s
+zookeeper-1 1/1 Running 0 40s
+zookeeper-2 0/1 Pending 0 0s
+zookeeper-2 0/1 Pending 0 0s
+zookeeper-2 0/1 ContainerCreating 0 0s
+zookeeper-2 0/1 Running 0 19s
+zookeeper-2 1/1 Running 0 41s
+```
+
+Check the previously inserted key:
+```console
+$ kubectl exec zookeeper-1 -- /opt/zookeeper/bin/zkCli.sh get /foo
+ionid = 0x354887858e80035, negotiated timeout = 30000
+
+WATCHER::
+
+WatchedEvent state:SyncConnected type:None path:null
+bar
+```
+
+## Scaling
+ZooKeeper can not be safely scaled in versions prior to 3.5.x. This chart currently uses 3.4.x. There are manual procedures for scaling a 3.4.x ensemble, but as noted in the [ZooKeeper 3.5.2 documentation](https://zookeeper.apache.org/doc/r3.5.2-alpha/zookeeperReconfig.html) these procedures require a rolling restart, are known to be error prone, and often result in a data loss.
+
+While ZooKeeper 3.5.x does allow for dynamic ensemble reconfiguration (including scaling membership), the current status of the release is still alpha, and 3.5.x is therefore not recommended for production use.
+
+## Limitations
+* StatefulSet and PodDisruptionBudget are beta resources.
+* Only supports storage options that have backends for persistent volume claims.
diff --git a/kubernetes/common/music/charts/zookeeper/templates/NOTES.txt b/kubernetes/common/music/charts/zookeeper/templates/NOTES.txt
new file mode 100644
index 0000000000..4f7a27bd99
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/NOTES.txt
@@ -0,0 +1,7 @@
+Thank you for installing ZooKeeper on your Kubernetes cluster. More information
+about ZooKeeper can be found at https://zookeeper.apache.org/doc/current/
+
+Your connection string should look like:
+ {{ template "common.fullname" . }}-0.{{ template "common.fullname" . }}-headless:{{ .Values.service.ports.client.port }},{{ template "common.fullname" . }}-1.{{ template "common.fullname" . }}-headless:{{ .Values.service.ports.client.port }},...
+
+You can also use the client service {{ template "common.fullname" . }}:{{ .Values.service.ports.client.port }} to connect to an available ZooKeeper server.
diff --git a/kubernetes/common/music/charts/zookeeper/templates/_helpers.tpl b/kubernetes/common/music/charts/zookeeper/templates/_helpers.tpl
new file mode 100644
index 0000000000..1ef5353fa1
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "common.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "common.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "zookeeper.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/kubernetes/common/music/charts/zookeeper/templates/config-jmx-exporter.yaml b/kubernetes/common/music/charts/zookeeper/templates/config-jmx-exporter.yaml
new file mode 100644
index 0000000000..aeb9a2c031
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/config-jmx-exporter.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.exporters.jmx.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-jmx-exporter
+ labels:
+ app: {{ template "common.name" . }}
+ chart: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ config.yml: |-
+ hostPort: 127.0.0.1:{{ .Values.env.JMXPORT }}
+ lowercaseOutputName: {{ .Values.exporters.jmx.config.lowercaseOutputName }}
+ rules:
+{{ .Values.exporters.jmx.config.rules | toYaml | indent 6 }}
+ ssl: false
+ startDelaySeconds: {{ .Values.exporters.jmx.config.startDelaySeconds }}
+{{- end }}
diff --git a/kubernetes/common/music/charts/zookeeper/templates/job-chroots.yaml b/kubernetes/common/music/charts/zookeeper/templates/job-chroots.yaml
new file mode 100644
index 0000000000..38592ddeac
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/job-chroots.yaml
@@ -0,0 +1,62 @@
+{{- if .Values.jobs.chroots.enabled }}
+{{- $root := . }}
+{{- $job := .Values.jobs.chroots }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "common.fullname" . }}-chroots
+ annotations:
+ "helm.sh/hook": post-install,post-upgrade
+ "helm.sh/hook-weight": "-5"
+ "helm.sh/hook-delete-policy": hook-succeeded
+ labels:
+ app: {{ template "common.name" . }}
+ chart: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ component: jobs
+ job: chroots
+spec:
+ activeDeadlineSeconds: {{ $job.activeDeadlineSeconds }}
+ backoffLimit: {{ $job.backoffLimit }}
+ completions: {{ $job.completions }}
+ parallelism: {{ $job.parallelism }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "common.name" . }}
+ release: {{ .Release.Name }}
+ component: jobs
+ job: chroots
+ spec:
+ restartPolicy: {{ $job.restartPolicy }}
+ containers:
+ - name: main
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - /bin/bash
+ - -o
+ - pipefail
+ - -euc
+ {{- $port := .Values.service.ports.client.port }}
+ - >
+ sleep 15;
+ export SERVER={{ template "common.fullname" $root }}:{{ $port }};
+ {{- range $job.config.create }}
+ echo '==> {{ . }}';
+ echo '====> Create chroot if does not exist.';
+ zkCli.sh -server {{ template "common.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid'
+ || zkCli.sh -server {{ template "common.fullname" $root }}:{{ $port }} create {{ . }} "";
+ echo '====> Confirm chroot exists.';
+ zkCli.sh -server {{ template "common.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid';
+ echo '====> Chroot exists.';
+ {{- end }}
+ env:
+ {{- range $key, $value := $job.env }}
+ - name: {{ $key | upper | replace "." "_" }}
+ value: {{ $value | quote }}
+ {{- end }}
+ resources:
+{{ toYaml $job.resources | indent 12 }}
+{{- end -}}
diff --git a/kubernetes/common/music/charts/zookeeper/templates/poddisruptionbudget.yaml b/kubernetes/common/music/charts/zookeeper/templates/poddisruptionbudget.yaml
new file mode 100644
index 0000000000..7a9f3fe105
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/poddisruptionbudget.yaml
@@ -0,0 +1,17 @@
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "common.fullname" . }}
+ labels:
+ app: {{ template "common.name" . }}
+ chart: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ component: server
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "common.name" . }}
+ release: {{ .Release.Name }}
+ component: server
+{{ toYaml .Values.podDisruptionBudget | indent 2 }}
diff --git a/kubernetes/common/music/charts/zookeeper/templates/service-headless.yaml b/kubernetes/common/music/charts/zookeeper/templates/service-headless.yaml
new file mode 100644
index 0000000000..70ebf6013e
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/service-headless.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "common.fullname" . }}-headless
+ labels:
+ app: {{ template "common.name" . }}
+ chart: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ clusterIP: None
+ ports:
+{{- range $key, $port := .Values.ports }}
+ - name: {{ $key }}
+ port: {{ $port.containerPort }}
+ targetPort: {{ $port.name }}
+ protocol: {{ $port.protocol }}
+{{- end }}
+ selector:
+ app: {{ template "common.name" . }}
+ release: {{ .Release.Name }}
diff --git a/kubernetes/common/music/charts/zookeeper/templates/service.yaml b/kubernetes/common/music/charts/zookeeper/templates/service.yaml
new file mode 100644
index 0000000000..6ac3066ecf
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/service.yaml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.service.name }}
+ labels:
+ app: {{ template "common.name" . }}
+ chart: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+{{- with .Values.service.annotations }}
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ {{- range $key, $value := .Values.service.ports }}
+ - name: {{ $key }}
+{{ toYaml $value | indent 6 }}
+ {{- end }}
+ selector:
+ app: {{ template "common.name" . }}
+ release: {{ .Release.Name }}
diff --git a/kubernetes/common/music/charts/zookeeper/templates/statefulset.yaml b/kubernetes/common/music/charts/zookeeper/templates/statefulset.yaml
new file mode 100644
index 0000000000..1efd46657c
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/statefulset.yaml
@@ -0,0 +1,188 @@
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ template "common.fullname" . }}
+ labels:
+ app: {{ template "common.name" . }}
+ chart: {{ .Chart.Name }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ component: server
+spec:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - "{{ .Chart.Name }}"
+ serviceName: {{ template "common.fullname" . }}-headless
+ replicas: {{ .Values.replicaCount }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
+ selector:
+ matchLabels:
+ app: {{ template "common.name" . }}
+ release: {{ .Release.Name }}
+ component: server
+ updateStrategy:
+{{ toYaml .Values.updateStrategy | indent 4 }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "common.name" . }}
+ release: {{ .Release.Name }}
+ component: server
+ {{- if .Values.podLabels }}
+ ## Custom pod labels
+ {{- range $key, $value := .Values.podLabels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ annotations:
+ {{- if .Values.podAnnotations }}
+ ## Custom pod annotations
+ {{- range $key, $value := .Values.podAnnotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ spec:
+{{- if .Values.schedulerName }}
+ schedulerName: "{{ .Values.schedulerName }}"
+{{- end }}
+ securityContext:
+{{ toYaml .Values.securityContext | indent 8 }}
+ containers:
+
+ - name: zookeeper
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - /bin/bash
+ - -xec
+ - zkGenConfig.sh && exec zkServer.sh start-foreground
+ ports:
+{{- range $key, $port := .Values.ports }}
+ - name: {{ $key }}
+{{ toYaml $port | indent 14 }}
+{{- end }}
+ livenessProbe:
+{{ toYaml .Values.livenessProbe | indent 12 }}
+ readinessProbe:
+{{ toYaml .Values.readinessProbe | indent 12 }}
+ env:
+ - name: ZK_REPLICAS
+ value: {{ .Values.replicaCount | quote }}
+ {{- range $key, $value := .Values.env }}
+ - name: {{ $key | upper | replace "." "_" }}
+ value: {{ $value | quote }}
+ {{- end }}
+ resources:
+{{ include "common.resources" . }}
+ volumeMounts:
+ - name: zookeeper-data
+ mountPath: /var/lib/zookeeper
+
+{{- if .Values.exporters.jmx.enabled }}
+ - name: jmx-exporter
+ image: "{{ .Values.exporters.jmx.image.repository }}:{{ .Values.exporters.jmx.image.tag }}"
+ imagePullPolicy: {{ .Values.exporters.jmx.image.pullPolicy }}
+ ports:
+ {{- range $key, $port := .Values.exporters.jmx.ports }}
+ - name: {{ $key }}
+{{ toYaml $port | indent 14 }}
+ {{- end }}
+ livenessProbe:
+{{ toYaml .Values.exporters.jmx.livenessProbe | indent 12 }}
+ readinessProbe:
+{{ toYaml .Values.exporters.jmx.readinessProbe | indent 12 }}
+ env:
+ - name: SERVICE_PORT
+ value: {{ .Values.exporters.jmx.ports.jmxxp.containerPort | quote }}
+ {{- with .Values.exporters.jmx.env }}
+ {{- range $key, $value := . }}
+ - name: {{ $key | upper | replace "." "_" }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ resources:
+{{ toYaml .Values.exporters.jmx.resources | indent 12 }}
+ volumeMounts:
+ - name: config-jmx-exporter
+ mountPath: /opt/jmx_exporter/config.yml
+ subPath: config.yml
+{{- end }}
+
+{{- if .Values.exporters.zookeeper.enabled }}
+ - name: zookeeper-exporter
+ image: "{{ .Values.exporters.zookeeper.image.repository }}:{{ .Values.exporters.zookeeper.image.tag }}"
+ imagePullPolicy: {{ .Values.exporters.zookeeper.image.pullPolicy }}
+ args:
+ - -bind-addr=:{{ .Values.exporters.zookeeper.ports.zookeeperxp.containerPort }}
+ - -metrics-path={{ .Values.exporters.zookeeper.path }}
+ - -zookeeper=localhost:{{ .Values.ports.client.containerPort }}
+ - -log-level={{ .Values.exporters.zookeeper.config.logLevel }}
+ - -reset-on-scrape={{ .Values.exporters.zookeeper.config.resetOnScrape }}
+ ports:
+ {{- range $key, $port := .Values.exporters.zookeeper.ports }}
+ - name: {{ $key }}
+{{ toYaml $port | indent 14 }}
+ {{- end }}
+ livenessProbe:
+{{ toYaml .Values.exporters.zookeeper.livenessProbe | indent 12 }}
+ readinessProbe:
+{{ toYaml .Values.exporters.zookeeper.readinessProbe | indent 12 }}
+ env:
+ {{- range $key, $value := .Values.exporters.zookeeper.env }}
+ - name: {{ $key | upper | replace "." "_" }}
+ value: {{ $value | quote }}
+ {{- end }}
+ resources:
+{{ toYaml .Values.exporters.zookeeper.resources | indent 12 }}
+{{- end }}
+
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- if (or .Values.exporters.jmx.enabled (not .Values.persistence.enabled)) }}
+ volumes:
+ {{- if .Values.exporters.jmx.enabled }}
+ - name: config-jmx-exporter
+ configMap:
+ name: {{ .Release.Name }}-jmx-exporter
+ {{- end }}
+ {{- if not .Values.persistence.enabled }}
+ - name: zookeeper-data
+ emptyDir: {}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: zookeeper-data
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ annotations:
+ volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.storageClass }}
+ spec:
+ storageClassName: {{ .Values.persistence.storageClass }}
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- end }}
diff --git a/kubernetes/common/music/charts/zookeeper/templates/volumes.yaml b/kubernetes/common/music/charts/zookeeper/templates/volumes.yaml
new file mode 100644
index 0000000000..b0c05fdbfd
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/templates/volumes.yaml
@@ -0,0 +1,25 @@
+{{ if .Values.persistence.enabled }}
+{{- $root := . -}}
+{{ range $i, $e := until (atoi (quote $root.Values.replicaCount) | default 3) }}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ $root.Release.Name }}-{{ $root.Values.service.name }}-{{ $i }}
+ namespace: {{ $root.Release.Namespace }}
+ labels:
+ type: {{ $root.Values.persistence.storageType }}
+ app: {{ $root.Values.service.name }}
+ chart: {{ $root.Chart.Name }}-{{ $root.Chart.Version | replace "+" "_" }}
+ release: {{ $root.Release.Name }}
+ heritage: {{ $root.Release.Service }}
+spec:
+ capacity:
+ storage: {{ $root.Values.persistence.size }}
+ accessModes:
+ - {{ $root.Values.persistence.accessMode }}
+ hostPath:
+ path: {{ $root.Values.persistence.mountPath }}/{{ $root.Release.Name }}/{{ $root.Values.persistence.mountSubPath }}-{{$i}}
+ persistentVolumeReclaimPolicy: {{ $root.Values.persistence.volumeReclaimPolicy }}
+{{ end }}
+{{ end }}
diff --git a/kubernetes/common/music/charts/zookeeper/values.yaml b/kubernetes/common/music/charts/zookeeper/values.yaml
new file mode 100644
index 0000000000..ea02e6151e
--- /dev/null
+++ b/kubernetes/common/music/charts/zookeeper/values.yaml
@@ -0,0 +1,284 @@
+## As weighted quorums are not supported, it is imperative that an odd number of replicas
+## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7.
+##
+## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set
+replicaCount: 3 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7)
+
+podDisruptionBudget:
+ maxUnavailable: 1 # Limits how many Zokeeper pods may be unavailable due to voluntary disruptions.
+
+terminationGracePeriodSeconds: 1800 # Duration in seconds a Zokeeper pod needs to terminate gracefully.
+
+## OnDelete requires you to manually delete each pod when making updates.
+## This approach is at the moment safer than RollingUpdate because replication
+## may be incomplete when replication source pod is killed.
+##
+## ref: http://blog.kubernetes.io/2017/09/kubernetes-statefulsets-daemonsets.html
+updateStrategy:
+ type: OnDelete # Pods will only be created when you manually delete old pods.
+
+## refs:
+## - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper
+## - https://github.com/kubernetes/contrib/blob/master/statefulsets/zookeeper/Makefile#L1
+image:
+ #repository: nexus3.onap.org:10001/library/zookeeper
+ #tag: 3.3
+ repository: gcr.io/google_samples/k8szk # Container image repository for zookeeper container.
+ tag: v3 # Container image tag for zookeeper container.
+ pullPolicy: IfNotPresent # Image pull criteria for zookeeper container.
+
+service:
+ name: zookeeper
+ type: ClusterIP # Exposes zookeeper on a cluster-internal IP.
+ annotations: {} # Arbitrary non-identifying metadata for zookeeper service.
+ ## AWS example for use with LoadBalancer service type.
+ # external-dns.alpha.kubernetes.io/hostname: zookeeper.cluster.local
+ # service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
+ # service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ ports:
+ client:
+ port: 2181 # Service port number for client port.
+ targetPort: client # Service target port for client port.
+ protocol: TCP # Service port protocol for client port.
+
+
+ports:
+ client:
+ containerPort: 2181 # Port number for zookeeper container client port.
+ protocol: TCP # Protocol for zookeeper container client port.
+ election:
+ containerPort: 3888 # Port number for zookeeper container election port.
+ protocol: TCP # Protocol for zookeeper container election port.
+ server:
+ containerPort: 2888 # Port number for zookeeper container server port.
+ protocol: TCP # Protocol for zookeeper container server port.
+
+# Resource Limit flavor -By Default using small
+flavor: large
+# Segregation for Different environment (Small and Large)
+resources:
+ small:
+ limits:
+ cpu: 1
+ memory: 1Gi
+ requests:
+ cpu: 500m
+ memory: 500Mi
+ large:
+ limits:
+ cpu: 3
+ memory: 2Gi
+ requests:
+ cpu: 2
+ memory: 1Gi
+ unlimited: {}
+
+nodeSelector: {} # Node label-values required to run zookeeper pods.
+
+tolerations: [] # Node taint overrides for zookeeper pods.
+
+affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods.
+affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ release: zookeeper
+
+podAnnotations: {} # Arbitrary non-identifying metadata for zookeeper pods.
+
+podLabels: {} # Key/value pairs that are attached to zookeeper pods.
+
+livenessProbe:
+ exec:
+ command:
+ - zkOk.sh
+ initialDelaySeconds: 20
+
+readinessProbe:
+ exec:
+ command:
+ - zkOk.sh
+ initialDelaySeconds: 20
+
+securityContext:
+ fsGroup: 1000
+ #runAsUser: 1000
+
+persistence:
+ enabled: true
+ ## zookeeper data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ volumeReclaimPolicy: Retain
+ accessMode: ReadWriteOnce
+ mountPath: /dockerdata-nfs
+ mountSubPath: music/zookeeper
+ storageType: local
+ storageClass: ""
+ size: 4Gi
+
+## Exporters query apps for metrics and make those metrics available for
+## Prometheus to scrape.
+exporters:
+
+ jmx:
+ enabled: false
+ image:
+ repository: sscaling/jmx-prometheus-exporter
+ tag: 0.3.0
+ pullPolicy: IfNotPresent
+ config:
+ lowercaseOutputName: false
+ rules:
+ - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
+ name: "zookeeper_$2"
+ - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
+ name: "zookeeper_$3"
+ labels:
+ replicaId: "$2"
+ - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
+ name: "zookeeper_$4"
+ labels:
+ replicaId: "$2"
+ memberType: "$3"
+ - pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
+ name: "zookeeper_$4_$5"
+ labels:
+ replicaId: "$2"
+ memberType: "$3"
+ startDelaySeconds: 30
+ env: {}
+ resources: {}
+ path: /metrics
+ ports:
+ jmxxp:
+ containerPort: 9404
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: jmxxp
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 60
+ failureThreshold: 8
+ successThreshold: 1
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: jmxxp
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 60
+ failureThreshold: 8
+ successThreshold: 1
+
+ zookeeper:
+ enabled: false
+ image:
+ repository: josdotso/zookeeper-exporter
+ tag: v1.1.2
+ pullPolicy: IfNotPresent
+ config:
+ logLevel: info
+ resetOnScrape: "true"
+ env: {}
+ resources: {}
+ path: /metrics
+ ports:
+ zookeeperxp:
+ containerPort: 9141
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: zookeeperxp
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 60
+ failureThreshold: 8
+ successThreshold: 1
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: zookeeperxp
+ initialDelaySeconds: 30
+ periodSeconds: 15
+ timeoutSeconds: 60
+ failureThreshold: 8
+ successThreshold: 1
+
+env:
+
+ ## Options related to JMX exporter.
+ JMXAUTH: "false"
+ JMXDISABLE: "false"
+ JMXPORT: 1099
+ JMXSSL: "false"
+
+ ## The port on which the server will accept client requests.
+ ZK_CLIENT_PORT: 2181
+
+ ## The port on which the ensemble performs leader election.
+ ZK_ELECTION_PORT: 3888
+
+ ## The JVM heap size.
+ ZK_HEAP_SIZE: 2G
+
+ ## The number of Ticks that an ensemble member is allowed to perform leader
+ ## election.
+ ZK_INIT_LIMIT: 5
+
+ ## The Log Level that for the ZooKeeper processes logger.
+ ## Choices are `TRACE,DEBUG,INFO,WARN,ERROR,FATAL`.
+ ZK_LOG_LEVEL: INFO
+
+ ## The maximum number of concurrent client connections that
+ ## a server in the ensemble will accept.
+ ZK_MAX_CLIENT_CNXNS: 60
+
+ ## The maximum session timeout that the ensemble will allow a client to request.
+ ## Upstream default is `20 * ZK_TICK_TIME`
+ ZK_MAX_SESSION_TIMEOUT: 40000
+
+ ## The minimum session timeout that the ensemble will allow a client to request.
+ ## Upstream default is `2 * ZK_TICK_TIME`.
+ ZK_MIN_SESSION_TIMEOUT: 4000
+
+ ## The delay, in hours, between ZooKeeper log and snapshot cleanups.
+ ZK_PURGE_INTERVAL: 0
+
+ ## The port on which the leader will send events to followers.
+ ZK_SERVER_PORT: 2888
+
+ ## The number of snapshots that the ZooKeeper process will retain if
+ ## `ZK_PURGE_INTERVAL` is set to a value greater than `0`.
+ ZK_SNAP_RETAIN_COUNT: 3
+
+ ## The number of Tick by which a follower may lag behind the ensembles leader.
+ ZK_SYNC_LIMIT: 10
+
+ ## The number of wall clock ms that corresponds to a Tick for the ensembles
+ ## internal time.
+ ZK_TICK_TIME: 2000
+
+jobs:
+ chroots:
+ enabled: false
+ activeDeadlineSeconds: 300
+ backoffLimit: 5
+ completions: 1
+ config:
+ create: []
+ # - /kafka
+ # - /ureplicator
+ env: []
+ parallelism: 1
+ resources: {}
+ restartPolicy: Never