aboutsummaryrefslogtreecommitdiffstats
path: root/deployments/helm/v2/emco/etcd/templates
diff options
context:
space:
mode:
authorTodd Malsbary <todd.malsbary@intel.com>2020-08-07 10:20:12 -0700
committerRitu Sood <ritu.sood@intel.com>2020-09-23 20:00:03 +0000
commit2cbcccc6d06324f3c91306ed73f262b631457a23 (patch)
treea9365925c2b1362c0cafec15cb79e9446e1c470c /deployments/helm/v2/emco/etcd/templates
parent18c0ffea7babfcda74e4367413a331259c406c10 (diff)
Add playbooks for v2 emco chart.
Rename v2/onap4k8s to v2/emco, and rename sanity-check-for-v2.sh to emco.sh. This allows --plugins emco to be passed to installer.sh in place of --plugins onap4k8s. Issue-ID: MULTICLOUD-1181 Signed-off-by: Todd <todd.malsbary@intel.com> Change-Id: Idb427a8aa4c8aaff181965a540078c8cf6dd88aa
Diffstat (limited to 'deployments/helm/v2/emco/etcd/templates')
-rw-r--r--deployments/helm/v2/emco/etcd/templates/pv.yaml27
-rw-r--r--deployments/helm/v2/emco/etcd/templates/service.yaml37
-rw-r--r--deployments/helm/v2/emco/etcd/templates/statefulset.yaml236
3 files changed, 300 insertions, 0 deletions
diff --git a/deployments/helm/v2/emco/etcd/templates/pv.yaml b/deployments/helm/v2/emco/etcd/templates/pv.yaml
new file mode 100644
index 00000000..f0cf59ce
--- /dev/null
+++ b/deployments/helm/v2/emco/etcd/templates/pv.yaml
@@ -0,0 +1,27 @@
+{{ if .Values.persistence.enabled }}
+{{- $root := . -}}
+{{ range $i, $e := until (int $root.Values.replicaCount) }}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ include "common.fullname" $root }}-data-{{ $i }}
+ namespace: {{ $root.Release.Namespace }}
+ labels:
+ type: {{ $root.Values.persistence.storageType }}
+ app: {{ include "common.fullname" $root }}
+ chart: {{ $root.Chart.Name }}-{{ $root.Chart.Version | replace "+" "_" }}
+ release: {{ $root.Release.Name }}
+ heritage: {{ $root.Release.Service }}
+spec:
+ capacity:
+ storage: {{ $root.Values.persistence.storage }}
+ accessModes:
+ - {{ $root.Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" $root }}-data"
+ hostPath:
+ path: {{ $root.Values.persistence.mountPath }}/{{ $root.Release.Name }}/{{ $root.Values.persistence.mountSubPath }}-{{ $i }}
+ persistentVolumeReclaimPolicy: {{ $root.Values.persistence.volumeReclaimPolicy }}
+{{ end }}
+{{ end }}
+
diff --git a/deployments/helm/v2/emco/etcd/templates/service.yaml b/deployments/helm/v2/emco/etcd/templates/service.yaml
new file mode 100644
index 00000000..692faa9f
--- /dev/null
+++ b/deployments/helm/v2/emco/etcd/templates/service.yaml
@@ -0,0 +1,37 @@
+# Copyright 2019 Intel Corporation Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+metadata:
+ name: {{ include "common.servicename" . }}
+ labels:
+ heritage: "{{ .Release.Service }}"
+ release: "{{ .Release.Name }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ include "common.name" . }}
+spec:
+ ports:
+ - name: {{ .Values.service.peerPortName }}
+ port: {{ .Values.service.peerInternalPort }}
+ - name: {{ .Values.service.clientPortName }}
+ port: {{ .Values.service.clientInternalPort }}
+ clusterIP: None
+ selector:
+ app: {{ include "common.name" . }}
+ release: "{{ .Release.Name }}"
+
diff --git a/deployments/helm/v2/emco/etcd/templates/statefulset.yaml b/deployments/helm/v2/emco/etcd/templates/statefulset.yaml
new file mode 100644
index 00000000..d0387f8e
--- /dev/null
+++ b/deployments/helm/v2/emco/etcd/templates/statefulset.yaml
@@ -0,0 +1,236 @@
+# Copyright © 2019 Intel Corporation Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "common.fullname" . }}
+ labels:
+ heritage: "{{ .Release.Service }}"
+ release: "{{ .Release.Name }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ include "common.name" . }}
+spec:
+ serviceName: {{ include "common.servicename" .}}
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ release: "{{ .Release.Name }}"
+ app: {{ include "common.name" . }}
+ template:
+ metadata:
+ labels:
+ release: "{{ .Release.Name }}"
+ app: {{ include "common.name" . }}
+ spec:
+{{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+{{- end }}
+{{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+{{- end }}
+ containers:
+ - name: {{ include "common.fullname" . }}
+ image: "{{ .Values.repository }}/{{ .Values.image }}"
+ imagePullPolicy: "{{ .Values.pullPolicy }}"
+ ports:
+ - containerPort: {{ .Values.service.peerInternalPort }}
+ name: {{ .Values.service.peerPortName }}
+ - containerPort: {{ .Values.service.clientInternalPort }}
+ name: {{ .Values.service.clientPortName }}
+ {{- if eq .Values.liveness.enabled true }}
+ livenessProbe:
+ tcpSocket:
+ port: {{ .Values.service.clientInternalPort }}
+ initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+ periodSeconds: {{ .Values.liveness.periodSeconds }}
+ timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+ {{ end }}
+ resources:
+{{ include "common.resources" . | indent 10 }}
+ env:
+ - name: INITIAL_CLUSTER_SIZE
+ value: {{ .Values.replicaCount | quote }}
+ - name: SET_NAME
+ value: {{ include "common.fullname" . }}
+ - name: SERVICE_NAME
+ value: {{ include "common.servicename" . }}
+{{- if .Values.extraEnv }}
+{{ toYaml .Values.extraEnv | indent 8 }}
+{{- end }}
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/bin/sh"
+ - "-ec"
+ - |
+ EPS=""
+ for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
+ EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
+ done
+
+ HOSTNAME=$(hostname)
+
+ member_hash() {
+ etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
+ }
+
+ SET_ID=${HOSTNAME##*[^0-9]}
+
+ if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
+ echo "Removing ${HOSTNAME} from etcd cluster"
+ ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
+ if [ $? -eq 0 ]; then
+ # Remove everything otherwise the cluster will no longer scale-up
+ rm -rf /var/run/etcd/*
+ fi
+ fi
+ command:
+ - "/bin/sh"
+ - "-ec"
+ - |
+ HOSTNAME=$(hostname)
+
+ # store member id into PVC for later member replacement
+ collect_member() {
+ while ! etcdctl member list &>/dev/null; do sleep 1; done
+ etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
+ exit 0
+ }
+
+ eps() {
+ EPS=""
+ for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
+ EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
+ done
+ echo ${EPS}
+ }
+
+ member_hash() {
+ etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
+ }
+
+ # we should wait for other pods to be up before trying to join
+ # otherwise we got "no such host" errors when trying to resolve other members
+ for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
+ while true; do
+ echo "Waiting for ${SET_NAME}-${i}.${SERVICE_NAME} to come up"
+ ping -W 1 -c 1 ${SET_NAME}-${i}.${SERVICE_NAME} > /dev/null && break
+ sleep 1s
+ done
+ done
+
+ # re-joining after failure?
+ if [[ -e /var/run/etcd/default.etcd && -f /var/run/etcd/member_id ]]; then
+ echo "Re-joining etcd member"
+ member_id=$(cat /var/run/etcd/member_id)
+
+ # re-join member
+ ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SERVICE_NAME}:2380 | true
+ exec etcd --name ${HOSTNAME} \
+ --listen-peer-urls http://0.0.0.0:2380 \
+ --listen-client-urls http://0.0.0.0:2379\
+ --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
+ --data-dir /var/run/etcd/default.etcd
+ fi
+
+ # etcd-SET_ID
+ SET_ID=${HOSTNAME##*[^0-9]}
+
+ # adding a new member to existing cluster (assuming all initial pods are available)
+ if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
+ export ETCDCTL_ENDPOINT=$(eps)
+
+ # member already added?
+ MEMBER_HASH=$(member_hash)
+ if [ -n "${MEMBER_HASH}" ]; then
+ # the member hash exists but for some reason etcd failed
+ # as the datadir has not be created, we can remove the member
+ # and retrieve new hash
+ etcdctl member remove ${MEMBER_HASH}
+ fi
+
+ echo "Adding new member"
+ etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SERVICE_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
+
+ if [ $? -ne 0 ]; then
+ echo "Exiting"
+ rm -f /var/run/etcd/new_member_envs
+ exit 1
+ fi
+
+ cat /var/run/etcd/new_member_envs
+ source /var/run/etcd/new_member_envs
+
+ collect_member &
+
+ exec etcd --name ${HOSTNAME} \
+ --listen-peer-urls http://0.0.0.0:2380 \
+ --listen-client-urls http://0.0.0.0:2379 \
+ --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
+ --data-dir /var/run/etcd/default.etcd \
+ --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
+ --initial-cluster ${ETCD_INITIAL_CLUSTER} \
+ --initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
+ fi
+
+ PEERS=""
+ for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
+ PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SERVICE_NAME}:2380"
+ done
+
+ collect_member &
+
+ # join member
+ exec etcd --name ${HOSTNAME} \
+ --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
+ --listen-peer-urls http://0.0.0.0:2380 \
+ --listen-client-urls http://0.0.0.0:2379 \
+ --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
+ --initial-cluster-token etcd-cluster-1 \
+ --initial-cluster ${PEERS} \
+ --initial-cluster-state new \
+ --data-dir /var/run/etcd/default.etcd
+ volumeMounts:
+ - name: {{ include "common.fullname" . }}-data
+ mountPath: /var/run/etcd
+ {{- if .Values.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: {{ include "common.fullname" . }}-data
+ spec:
+ accessModes:
+ - "{{ .Values.persistence.accessMode }}"
+ resources:
+ requests:
+ # upstream recommended max is 700M
+ storage: "{{ .Values.persistence.storage }}"
+ storageClassName: {{ include "common.fullname" . }}-data
+ {{- else }}
+ volumes:
+ - name: {{ include "common.fullname" . }}-data
+ {{- if .Values.memoryMode }}
+ emptyDir:
+ medium: Memory
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- end }}
+