diff options
Diffstat (limited to 'kubernetes/common')
11 files changed, 51 insertions, 58 deletions
diff --git a/kubernetes/common/cassandra/templates/statefulset.yaml b/kubernetes/common/cassandra/templates/statefulset.yaml index 0c7a112bcb..4be3570851 100644 --- a/kubernetes/common/cassandra/templates/statefulset.yaml +++ b/kubernetes/common/cassandra/templates/statefulset.yaml @@ -74,7 +74,7 @@ spec: timeoutSeconds: {{ .Values.liveness.timeoutSeconds }} successThreshold: {{ .Values.liveness.successThreshold }} failureThreshold: {{ .Values.liveness.failureThreshold }} - {{ end -}} + {{ end }} readinessProbe: exec: command: diff --git a/kubernetes/common/cassandra/values.yaml b/kubernetes/common/cassandra/values.yaml index 17760a7858..f5fe589309 100644 --- a/kubernetes/common/cassandra/values.yaml +++ b/kubernetes/common/cassandra/values.yaml @@ -62,7 +62,7 @@ liveness: enabled: true readiness: - initialDelaySeconds: 60 + initialDelaySeconds: 120 periodSeconds: 10 timeoutSeconds: 3 successThreshold: 1 diff --git a/kubernetes/common/dgbuilder/values.yaml b/kubernetes/common/dgbuilder/values.yaml index 944b63f212..4094801949 100644 --- a/kubernetes/common/dgbuilder/values.yaml +++ b/kubernetes/common/dgbuilder/values.yaml @@ -47,7 +47,7 @@ global: ################################################################# # application image repository: nexus3.onap.org:10001 -image: onap/ccsdk-dgbuilder-image:0.4.2 +image: onap/ccsdk-dgbuilder-image:0.4.4 pullPolicy: Always # flag to enable debugging - application support required diff --git a/kubernetes/common/etcd/templates/pv.yaml b/kubernetes/common/etcd/templates/pv.yaml index eeaa64598d..65993e5f2a 100644 --- a/kubernetes/common/etcd/templates/pv.yaml +++ b/kubernetes/common/etcd/templates/pv.yaml @@ -19,19 +19,20 @@ apiVersion: v1 kind: PersistentVolume metadata: - name: {{ $root.Release.Name }}-{{ $root.Values.service.name }}-{{ $i }} + name: {{ include "common.fullname" $root }}-data-{{ $i }} namespace: {{ $root.Release.Namespace }} labels: type: {{ $root.Values.persistence.storageType }} - app: {{ $root.Values.service.name }} + app: {{ include "common.fullname" $root }} chart: {{ $root.Chart.Name }}-{{ $root.Chart.Version | replace "+" "_" }} release: {{ $root.Release.Name }} heritage: {{ $root.Release.Service }} spec: capacity: - storage: {{ $root.Values.persistence.size }} + storage: {{ $root.Values.persistence.storage }} accessModes: - {{ $root.Values.persistence.accessMode }} + storageClassName: "{{ include "common.fullname" $root }}-data" hostPath: path: {{ $root.Values.persistence.mountPath }}/{{ $root.Release.Name }}/{{ $root.Values.persistence.mountSubPath }}-{{ $i }} persistentVolumeReclaimPolicy: {{ $root.Values.persistence.volumeReclaimPolicy }} diff --git a/kubernetes/common/etcd/templates/statefulset.yaml b/kubernetes/common/etcd/templates/statefulset.yaml index ccc6b69971..7190c5bca0 100644 --- a/kubernetes/common/etcd/templates/statefulset.yaml +++ b/kubernetes/common/etcd/templates/statefulset.yaml @@ -11,18 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - apiVersion: apps/v1beta1 kind: StatefulSet metadata: - name: {{ include "common.servicename" . }} + name: {{ include "common.fullname" . }} labels: heritage: "{{ .Release.Service }}" release: "{{ .Release.Name }}" chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - app: {{ template "common.name" . }} + app: {{ include "common.name" . }} spec: - serviceName: {{ include "common.servicename" . }} + serviceName: {{ include "common.servicename" .}} replicas: {{ .Values.replicaCount }} template: metadata: @@ -45,7 +44,7 @@ spec: {{ toYaml .Values.tolerations | indent 8 }} {{- end }} containers: - - name: {{ include "common.servicename" . }} + - name: {{ include "common.fullname" . }} image: "{{ .Values.repository }}/{{ .Values.image }}" imagePullPolicy: "{{ .Values.pullPolicy }}" ports: @@ -55,23 +54,20 @@ spec: name: {{ .Values.service.clientPortName }} {{- if eq .Values.liveness.enabled true }} livenessProbe: - exec: - command: ["/bin/sh", "-c", "etcdctl cluster-health | grep -w healthy" ] - initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }} - periodSeconds: {{ .Values.liveness.periodSeconds }} - timeoutSeconds: {{ .Values.liveness.timeoutSeconds }} - {{ end -}} - readinessProbe: - exec: - command: ["/bin/sh", "-c", "etcdctl cluster-health | grep -w healthy" ] - initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} - periodSeconds: {{ .Values.readiness.periodSeconds }} + tcpSocket: + port: {{ .Values.service.clientInternalPort }} + initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.liveness.periodSeconds }} + timeoutSeconds: {{ .Values.liveness.timeoutSeconds }} + {{ end -}} resources: {{ include "common.resources" . | indent 10 }} env: - name: INITIAL_CLUSTER_SIZE value: {{ .Values.replicaCount | quote }} - name: SET_NAME + value: {{ include "common.fullname" . }} + - name: SERVICE_NAME value: {{ include "common.servicename" . }} {{- if .Values.extraEnv }} {{ toYaml .Values.extraEnv | indent 8 }} @@ -85,13 +81,13 @@ spec: - | EPS="" for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do - EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379" + EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379" done HOSTNAME=$(hostname) member_hash() { - etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 + etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 } SET_ID=${HOSTNAME##*[^0-9]} @@ -113,43 +109,43 @@ spec: # store member id into PVC for later member replacement collect_member() { while ! etcdctl member list &>/dev/null; do sleep 1; done - etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id + etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id exit 0 } eps() { EPS="" for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do - EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379" + EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379" done echo ${EPS} } member_hash() { - etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 + etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 } # we should wait for other pods to be up before trying to join # otherwise we got "no such host" errors when trying to resolve other members for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do while true; do - echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up" - ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break + echo "Waiting for ${SET_NAME}-${i}.${SERVICE_NAME} to come up" + ping -W 1 -c 1 ${SET_NAME}-${i}.${SERVICE_NAME} > /dev/null && break sleep 1s done done # re-joining after failure? - if [ -e /var/run/etcd/default.etcd ]; then + if [[ -e /var/run/etcd/default.etcd && -f /var/run/etcd/member_id ]]; then echo "Re-joining etcd member" member_id=$(cat /var/run/etcd/member_id) # re-join member - ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SET_NAME}:2380 | true + ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SERVICE_NAME}:2380 | true exec etcd --name ${HOSTNAME} \ --listen-peer-urls http://0.0.0.0:2380 \ --listen-client-urls http://0.0.0.0:2379\ - --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \ + --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \ --data-dir /var/run/etcd/default.etcd fi @@ -170,7 +166,7 @@ spec: fi echo "Adding new member" - etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SET_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs + etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SERVICE_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs if [ $? -ne 0 ]; then echo "Exiting" @@ -186,37 +182,37 @@ spec: exec etcd --name ${HOSTNAME} \ --listen-peer-urls http://0.0.0.0:2380 \ --listen-client-urls http://0.0.0.0:2379 \ - --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \ + --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \ --data-dir /var/run/etcd/default.etcd \ - --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \ + --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \ --initial-cluster ${ETCD_INITIAL_CLUSTER} \ --initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE} fi PEERS="" for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do - PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380" + PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SERVICE_NAME}:2380" done collect_member & # join member exec etcd --name ${HOSTNAME} \ - --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \ + --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \ --listen-peer-urls http://0.0.0.0:2380 \ --listen-client-urls http://0.0.0.0:2379 \ - --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \ + --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \ --initial-cluster-token etcd-cluster-1 \ --initial-cluster ${PEERS} \ --initial-cluster-state new \ --data-dir /var/run/etcd/default.etcd volumeMounts: - - name: {{ include "common.servicename" . }}-datadir + - name: {{ include "common.fullname" . }}-data mountPath: /var/run/etcd {{- if .Values.persistence.enabled }} volumeClaimTemplates: - metadata: - name: {{ include "common.servicename" . }}-data + name: {{ include "common.fullname" . }}-data spec: accessModes: - "{{ .Values.persistence.accessMode }}" @@ -224,16 +220,10 @@ spec: requests: # upstream recommended max is 700M storage: "{{ .Values.persistence.storage }}" - {{- if .Values.persistence.storageClass }} - {{- if (eq "-" .Values.persistence.storageClass) }} - storageClassName: "" - {{- else }} - storageClassName: "{{ .Values.persistence.storageClass }}" - {{- end }} - {{- end }} + storageClassName: {{ include "common.fullname" . }}-data {{- else }} volumes: - - name: {{ include "common.servicename" . }}-datadir + - name: {{ include "common.fullname" . }}-data {{- if .Values.memoryMode }} emptyDir: medium: Memory diff --git a/kubernetes/common/etcd/values.yaml b/kubernetes/common/etcd/values.yaml index a999b0c530..7f53d22248 100644 --- a/kubernetes/common/etcd/values.yaml +++ b/kubernetes/common/etcd/values.yaml @@ -25,7 +25,7 @@ global: #repository: etcd repository: "k8s.gcr.io" -image: "etcd-amd64:2.2.5" +image: "etcd-amd64:3.2.24" pullPolicy: Always # default number of instances in the StatefulSet @@ -38,16 +38,12 @@ affinity: {} # probe configuration parameters liveness: initialDelaySeconds: 90 - periodSeconds: 10 - timeoutSeconds: 10 + periodSeconds: 30 + timeoutSeconds: 5 # necessary to disable liveness probe when setting breakpoints # in debugger so K8s doesn't restart unresponsive container enabled: true -readiness: - initialDelaySeconds: 90 - periodSeconds: 10 - persistence: enabled: false ## etcd data Persistent Volume Storage Class @@ -57,7 +53,7 @@ persistence: ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## - storageClass: "-" + #storageClass: "-" accessMode: "ReadWriteOnce" storage: "1Gi" mountPath: /dockerdata-nfs diff --git a/kubernetes/common/mariadb-galera/templates/statefulset.yaml b/kubernetes/common/mariadb-galera/templates/statefulset.yaml index 6dc5a7aad6..b0b7174811 100644 --- a/kubernetes/common/mariadb-galera/templates/statefulset.yaml +++ b/kubernetes/common/mariadb-galera/templates/statefulset.yaml @@ -89,6 +89,7 @@ spec: - /usr/share/container-scripts/mysql/readiness-probe.sh initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} periodSeconds: {{ .Values.readiness.periodSeconds }} + timeoutSeconds: {{ .Values.readiness.timeoutSeconds }} {{- if eq .Values.liveness.enabled true }} livenessProbe: exec: diff --git a/kubernetes/common/mariadb-galera/values.yaml b/kubernetes/common/mariadb-galera/values.yaml index e4c6550f2b..d91f4fd1bb 100644 --- a/kubernetes/common/mariadb-galera/values.yaml +++ b/kubernetes/common/mariadb-galera/values.yaml @@ -57,6 +57,7 @@ liveness: readiness: initialDelaySeconds: 15 periodSeconds: 10 + timeoutSeconds: 5 ## Persist data to a persitent volume persistence: diff --git a/kubernetes/common/music/charts/music-cassandra-job/templates/job.yaml b/kubernetes/common/music/charts/music-cassandra-job/templates/job.yaml index 6ec282e44e..88f0c7465f 100644 --- a/kubernetes/common/music/charts/music-cassandra-job/templates/job.yaml +++ b/kubernetes/common/music/charts/music-cassandra-job/templates/job.yaml @@ -39,6 +39,8 @@ spec: command: - /root/ready.py args: + - --timeout + - "{{ .Values.global.readinessTimeout }}" - --container-name - music-cassandra env: diff --git a/kubernetes/common/music/charts/music-cassandra-job/values.yaml b/kubernetes/common/music/charts/music-cassandra-job/values.yaml index ff34cbd5bf..ac18195939 100644 --- a/kubernetes/common/music/charts/music-cassandra-job/values.yaml +++ b/kubernetes/common/music/charts/music-cassandra-job/values.yaml @@ -22,7 +22,9 @@ global: # readiness check readinessRepository: oomk8s readinessImage: readiness-check:2.0.0 - + # Set default to 4 hrs. + # On slow environments dealys this long have been seen. + readinessTimeout: 240 # logging agent loggingRepository: docker.elastic.co loggingImage: beats/filebeat:5.5.0 diff --git a/kubernetes/common/network-name-gen/values.yaml b/kubernetes/common/network-name-gen/values.yaml index 888a07a640..02c69ad1e5 100644 --- a/kubernetes/common/network-name-gen/values.yaml +++ b/kubernetes/common/network-name-gen/values.yaml @@ -54,7 +54,7 @@ mariadb-galera: ################################################################# # application image repository: nexus3.onap.org:10001 -image: onap/ccsdk-apps-ms-neng:0.4.2 +image: onap/ccsdk-apps-ms-neng:0.4.4 pullPolicy: IfNotPresent # application configuration |