summaryrefslogtreecommitdiffstats
path: root/sdc-os-chef
diff options
context:
space:
mode:
Diffstat (limited to 'sdc-os-chef')
-rw-r--r--sdc-os-chef/kubernetes/sdc/Chart.yaml4
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/configmaps/sdc-check-job-completion.yaml82
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/configmaps/sdc-environment-configmap.yaml99
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-be.yaml130
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-cs.yaml86
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-es.yaml63
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-fe.yaml107
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-kb.yaml72
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-pv-pvc.yaml32
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/jobs/sdc-config-cs.yaml53
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/jobs/sdc-config-es.yaml47
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/secrets/sdc-cs-secret.yaml14
-rw-r--r--sdc-os-chef/kubernetes/sdc/templates/services/all-services.yaml127
-rw-r--r--sdc-os-chef/kubernetes/sdc/values.yaml21
-rw-r--r--sdc-os-chef/scripts/k8s/build_nsenter_exec.sh22
-rw-r--r--sdc-os-chef/scripts/k8s/deploy_k8s_sdc.sh74
-rw-r--r--sdc-os-chef/scripts/k8s/etc/bash_completion.d/nsenter63
-rwxr-xr-xsdc-os-chef/scripts/k8s/get_helm.sh230
-rw-r--r--sdc-os-chef/scripts/k8s/install_helm.sh20
-rw-r--r--sdc-os-chef/scripts/k8s/install_kubectl.sh19
-rw-r--r--sdc-os-chef/scripts/k8s/install_minikube.sh18
-rw-r--r--sdc-os-chef/scripts/k8s/kubernetes_run.sh77
22 files changed, 1460 insertions, 0 deletions
diff --git a/sdc-os-chef/kubernetes/sdc/Chart.yaml b/sdc-os-chef/kubernetes/sdc/Chart.yaml
new file mode 100644
index 0000000000..668a9a641e
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: sdc
+version: 0.1.0
diff --git a/sdc-os-chef/kubernetes/sdc/templates/configmaps/sdc-check-job-completion.yaml b/sdc-os-chef/kubernetes/sdc/templates/configmaps/sdc-check-job-completion.yaml
new file mode 100644
index 0000000000..2561cae4ba
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/configmaps/sdc-check-job-completion.yaml
@@ -0,0 +1,82 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: sdc-check-job-completion
+ namespace: onap-sdc
+data:
+ sdc_check_job_completion.py: |
+ #!/usr/bin/python
+ from __future__ import print_function
+ import time, argparse, logging, sys, os
+ import kubernetes.client
+ from kubernetes import client, config
+ from pprint import pprint
+
+ #extract env variables.
+ namespace = os.environ['NAMESPACE']
+ cert = os.environ['CERT']
+ host = os.environ['KUBERNETES_SERVICE_HOST']
+ token_path = os.environ['TOKEN']
+
+ with open(token_path, 'r') as token_file:
+ token = token_file.read().replace('\n', '')
+
+ client.configuration.api_key['authorization'] = token
+ client.configuration.api_key_prefix['authorization'] = 'Bearer'
+ client.configuration.host = "https://" + str(host)
+ client.configuration.ssl_ca_cert = cert
+
+ api_instance = client.BatchV1Api()
+
+ #setup logging
+ log = logging.getLogger(__name__)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
+ handler.setLevel(logging.INFO)
+ log.addHandler(handler)
+ log.setLevel(logging.INFO)
+
+
+ def is_ready(job_name):
+ log.info( "[INFO] Checking if " + job_name + " is completed")
+ pretty = True
+ job_status = False
+
+ try:
+ api_response = api_instance.read_namespaced_job_status(job_name, namespace, pretty=pretty)
+ except Exception as e:
+ print("Exception when calling BatchV1Api->read_namespaced_job_status: %s\n" % e)
+
+ pprint(api_response)
+ if api_response.status.succeeded == 1:
+ job_status_type = api_response.status.conditions[0].type
+ if job_status_type == "Complete":
+ job_status = True
+
+ print("[DBG] jobStatus: " + unicode(job_status))
+ return job_status
+
+
+ def main(args):
+ for job_name in args:
+ timeout = time.time() + 60 * 10
+ while True:
+ ready = is_ready(job_name)
+ if ready is True :
+ break
+ elif time.time() > timeout:
+ log.warning( "timed out waiting for '" + job_name + "' to be ready")
+ exit(1)
+ else:
+ time.sleep(5)
+
+
+ if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Process some names.')
+ parser.add_argument('--job-name', action='append', required=True, help='A container name')
+ args = parser.parse_args()
+ arg_dict = vars(args)
+
+ for arg in arg_dict.itervalues():
+ main(arg)
diff --git a/sdc-os-chef/kubernetes/sdc/templates/configmaps/sdc-environment-configmap.yaml b/sdc-os-chef/kubernetes/sdc/templates/configmaps/sdc-environment-configmap.yaml
new file mode 100644
index 0000000000..966180022b
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/configmaps/sdc-environment-configmap.yaml
@@ -0,0 +1,99 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: sdc-environment
+ namespace: onap-sdc
+data:
+ AUTO.json: |
+ {
+ "name": "{{ .Values.env.name }}",
+ "description": "OpenSource-{{ .Values.env.name }}",
+ "cookbook_versions": {
+ "Deploy-SDandC": "= 1.0.0"
+ },
+ "json_class": "Chef::Environment",
+ "chef_type": "environment",
+
+ "default_attributes": {
+ "CS_VIP": "{{ .Values.env.vip }}",
+ "BE_VIP": "{{ .Values.env.vip }}",
+ "FE_VIP": "{{ .Values.env.vip }}",
+ "ES_VIP": "{{ .Values.env.vip }}",
+ "interfaces": {
+ "application": "eth0",
+ "private": "eth0"
+ },
+ "ECompP": {
+ "ecomp_rest_url": "http://portalapps.onap-portal:8989/ONAPPORTAL/auxapi",
+ "ueb_url_list": "dmaap.onap-message-router,dmaap.onap-message-router",
+ "app_secret": "XftIATw9Jr3VzAcPqt3NnJOu",
+ "app_key": "x9UfO7JsDn8BESVX",
+ "inbox_name": "ECOMP-PORTAL-INBOX",
+ "ecomp_redirect_url": "http://portalapps.onap-portal:8989/ONAPPORTAL/login.htm",
+ "app_topic_name": "ECOMP-PORTAL-OUTBOX-SDC1",
+ "decryption_key": "AGLDdG4D04BKm2IxIWEr8o=="
+ },
+ "UEB": {
+ "PublicKey": "iPIxkpAMI8qTcQj8",
+ "SecretKey": "Ehq3WyT4bkif4zwgEbvshGal",
+ "fqdn": ["dmaap.onap-message-router", "dmaap.onap-message-router"]
+ },
+ "Nodes": {
+ "CS": "{{ .Values.env.nodeCS }}",
+ "BE": "{{ .Values.env.nodeBE }}",
+ "FE": "{{ .Values.env.nodeFE }}",
+ "ES": "{{ .Values.env.nodeES }}"
+ },
+ "Designers": {
+ "DCAE": {
+ "dcae_host": "yyy",
+ "dcae_port": "yyy",
+ "dcae_path": "yyy",
+ "dcae_protocol": "yyy"
+ },
+ "WORKFLOW": {
+ "workflow_host": "yyy",
+ "workflow_port": "yyy",
+ "workflow_path": "yyy",
+ "workflow_protocol": "yyy"
+ }
+ }
+ },
+ "override_attributes": {
+ "FE": {
+ "http_port": "8181",
+ "https_port": "9443"
+ },
+ "BE": {
+ "http_port": "8080",
+ "https_port": "8443"
+ },
+ "elasticsearch": {
+ "cluster_name": "SDC-ES-",
+ "ES_path_home": "/usr/share/elasticsearch",
+ "ES_path_data": "/usr/share/elasticsearch/data",
+ "num_of_replicas": "0",
+ "num_of_shards": "1"
+ },
+
+ "cassandra": {
+ "concurrent_reads": "32",
+ "num_tokens": "256",
+ "data_dir": "/var/lib/cassandra/data",
+ "hinted_handoff_enabled": "true",
+ "cassandra_user": "asdc_user",
+ "cassandra_password": "Aa1234%^!",
+ "concurrent_writes": "32",
+ "cluster_name": "SDC-CS-",
+ "multithreaded_compaction": "false",
+ "cache_dir": "/var/lib/cassandra/saved_caches",
+ "log_file": "/var/lib/cassandra/log/system.log",
+ "phi_convict_threshold": "8",
+ "commitlog_dir": "/var/lib/cassandra/commitlog",
+ "socket_read_timeout": "20000",
+ "socket_connect_timeout": "20000",
+ "titan_connection_timeout": "10000"
+ }
+ }
+ }
diff --git a/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-be.yaml b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-be.yaml
new file mode 100644
index 0000000000..ab293c2444
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-be.yaml
@@ -0,0 +1,130 @@
+#{{ if not .Values.disableSdcSdcBe }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: sdc-be
+ name: sdc-be
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ replicas: 1
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 2
+ maxUnavailable: 0
+ selector:
+ matchLabels:
+ app: sdc-be
+ template:
+ metadata:
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/configmaps/sdc-environment-configmap.yaml") . | sha256sum }}
+ checksum/config: {{ include (print $.Template.BasePath "/configmaps/sdc-check-job-completion.yaml") . | sha256sum }}
+ labels:
+ app: sdc-be
+ name: sdc-be
+ spec:
+ initContainers:
+ - name: sdc-be-readiness
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - sdc-es
+ - --container-name
+ - sdc-cs
+ - --container-name
+ - sdc-kb
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: sdc-job-completion
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ command:
+ - python
+ args:
+ - /root/readiness/sdc_check_job_completion.py
+ - --job-name
+ - sdc-config-cassandra
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - mountPath: /root/readiness
+ name: sdc-check-job-completion
+ containers:
+ - name: sdc-be
+ image: {{ .Values.image.sdcBackend }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ ports:
+ - containerPort: 8443
+ - containerPort: 8080
+ volumeMounts:
+ - mountPath: /usr/share/elasticsearch/data/
+ name: sdc-sdc-es-es
+ - mountPath: /root/chef-solo/environments/
+ name: sdc-environments
+ - mountPath: /etc/localtime
+ name: sdc-localtime
+ readOnly: true
+ - mountPath: /var/lib/jetty/logs
+ name: sdc-logs
+ - mountPath: /var/log/onap
+ name: sdc-logs-2
+ - mountPath: /tmp/logback.xml
+ name: sdc-logback
+ env:
+ - name: ENVNAME
+ value: AUTO
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ lifecycle:
+ postStart:
+ exec:
+ command: ["/bin/sh", "-c", "export LOG=wait_logback.log; touch $LOG; export SRC=/tmp/logback.xml; export DST=/var/lib/jetty/config/catalog-be/; while [ ! -e $DST ]; do echo 'Waiting for $DST...' >> $LOG; sleep 5; done; sleep 2; /bin/cp -f $SRC $DST; echo 'Done' >> $LOG"]
+ readinessProbe:
+ tcpSocket:
+ port: 8443
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ volumes:
+ - name: filebeat-conf
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+ - name: sdc-logs-2
+ emptyDir: {}
+ - name: sdc-data-filebeat
+ emptyDir: {}
+ - name: sdc-logback
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/sdc/be/logback.xml
+ - name: sdc-sdc-es-es
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/sdc-es/ES
+ - name: sdc-environments
+ configMap:
+ name: sdc-environment
+ - name: sdc-localtime
+ hostPath:
+ path: /etc/localtime
+ - name: sdc-logs
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/logs
+ - name: sdc-check-job-completion
+ configMap:
+ name: sdc-check-job-completion
+ imagePullSecrets:
+ - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+#{{ end }}
diff --git a/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-cs.yaml b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-cs.yaml
new file mode 100644
index 0000000000..64d18370e8
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-cs.yaml
@@ -0,0 +1,86 @@
+#{{ if not .Values.disableSdcSdcCs }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: sdc-cs
+ name: sdc-cs
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ selector:
+ matchLabels:
+ app: sdc-cs
+ template:
+ metadata:
+ labels:
+ app: sdc-cs
+ name: sdc-cs
+ spec:
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - sdc-es
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: sdc-cs-readiness
+ containers:
+ - name: sdc-cs
+ image: {{ .Values.image.sdcCassandra }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ env:
+ - name: ENVNAME
+ value: AUTO
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: ES_HEAP_SIZE
+ value: "1024M"
+ - name: CS_PASSWORD
+ valueFrom:
+ secretKeyRef: {name: sdc-cs-secret, key: cs_password}
+ volumeMounts:
+ - mountPath: /var/lib/cassandra/
+ name: sdc-sdc-cs-cs
+ - mountPath: /root/chef-solo/environments/
+ name: sdc-environments
+ - mountPath: /etc/localtime
+ name: sdc-localtime
+ readOnly: true
+ - mountPath: /var/lib/jetty/logs
+ name: sdc-logs
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - /var/lib/ready/probe.sh
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ volumes:
+ - name: sdc-sdc-cs-cs
+ persistentVolumeClaim:
+ claimName: sdc-cs-db
+ - name: sdc-environments
+ configMap:
+ name: sdc-environment
+ - name: sdc-localtime
+ hostPath:
+ path: /etc/localtime
+ - name: sdc-logs
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/logs
+ imagePullSecrets:
+ - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+#{{ end }}
diff --git a/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-es.yaml b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-es.yaml
new file mode 100644
index 0000000000..d673e40d2e
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-es.yaml
@@ -0,0 +1,63 @@
+#{{ if not .Values.disableSdcSdcEs }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: sdc-es
+ name: sdc-es
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ selector:
+ matchLabels:
+ app: sdc-es
+ template:
+ metadata:
+ labels:
+ app: sdc-es
+ name: sdc-es
+ spec:
+ containers:
+ - image: {{ .Values.image.sdcElasticsearch }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: sdc-es
+ env:
+ - name: ENVNAME
+ value: "AUTO"
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: ES_HEAP_SIZE
+ value: "1024M"
+ volumeMounts:
+ - mountPath: /root/chef-solo/environments/
+ name: sdc-environments
+ - mountPath: /etc/localtime
+ name: sdc-localtime
+ readOnly: true
+ - mountPath: /var/lib/jetty/logs
+ name: sdc-logs
+ ports:
+ - containerPort: 9200
+ - containerPort: 9300
+ readinessProbe:
+ httpGet:
+ path: "_cluster/health?wait_for_status=yellow&timeout=120s"
+ port: 9200
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 5
+ volumes:
+ - name: sdc-environments
+ configMap:
+ name: sdc-environment
+ - name: sdc-localtime
+ hostPath:
+ path: /etc/localtime
+ - name: sdc-logs
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/logs
+ imagePullSecrets:
+ - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+#{{ end }}
diff --git a/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-fe.yaml b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-fe.yaml
new file mode 100644
index 0000000000..c21efdcb8f
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-fe.yaml
@@ -0,0 +1,107 @@
+#{{ if not .Values.disableSdcSdcFe }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: sdc-fe
+ name: sdc-fe
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ selector:
+ matchLabels:
+ app: sdc-fe
+ template:
+ metadata:
+ labels:
+ app: sdc-fe
+ name: sdc-fe
+ spec:
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - sdc-es
+ - --container-name
+ - sdc-cs
+ - --container-name
+ - sdc-kb
+ - --container-name
+ - sdc-be
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: sdc-fe-readiness
+ volumes:
+# - name: filebeat-conf
+# hostPath:
+# path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+ - name: sdc-logs-2
+ emptyDir: {}
+ - name: sdc-data-filebeat
+ emptyDir: {}
+# - name: sdc-logback
+# hostPath:
+# path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/sdc/fe/logback.xml
+# - name: sdc-sdc-es-es
+# hostPath:
+# path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/sdc-es/ES
+ - name: sdc-environments
+ configMap:
+ name: sdc-environment
+ - name: sdc-localtime
+ hostPath:
+ path: /etc/localtime
+# - name: sdc-logs
+# hostPath:
+# path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/logs
+# - name: sdc-fe-config
+# hostPath:
+# path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/sdc-fe/FE_2_setup_configuration.rb
+ imagePullSecrets:
+ - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+ containers:
+ - name: sdc-fe
+ env:
+ - name: ENVNAME
+ value: AUTO
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ image: {{ .Values.image.sdcFrontend }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ volumeMounts:
+# - mountPath: /usr/share/elasticsearch/data/
+# name: sdc-sdc-es-es
+ - mountPath: /root/chef-solo/environments/
+ name: sdc-environments
+ - mountPath: /etc/localtime
+ name: sdc-localtime
+ readOnly: true
+# - mountPath: /var/lib/jetty/logs
+# name: sdc-logs
+# - mountPath: /var/log/onap
+# name: sdc-logs-2
+# - mountPath: /root/chef-solo/cookbooks/sdc-catalog-fe/recipes/FE_2_setup_configuration.rb
+# name: sdc-fe-config
+# - mountPath: /tmp/logback.xml
+# name: sdc-logback
+ lifecycle:
+ postStart:
+ exec:
+ command: ["/bin/sh", "-c", "export LOG=wait_logback.log; touch $LOG; export SRC=/tmp/logback.xml; export DST=/var/lib/jetty/config/catalog-fe/; while [ ! -e $DST ]; do echo 'Waiting for $DST...' >> $LOG; sleep 5; done; sleep 2; /bin/cp -f $SRC $DST; echo 'Done' >> $LOG"]
+ ports:
+ - containerPort: 9443
+ - containerPort: 8181
+ readinessProbe:
+ tcpSocket:
+ port: 8181
+ initialDelaySeconds: 5
+ periodSeconds: 10
+#{{ end }}
diff --git a/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-kb.yaml b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-kb.yaml
new file mode 100644
index 0000000000..5c7f4a4614
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-kb.yaml
@@ -0,0 +1,72 @@
+#{{ if not .Values.disableSdcSdcKb }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: sdc-kb
+ name: sdc-kb
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ selector:
+ matchLabels:
+ app: sdc-kb
+ template:
+ metadata:
+ labels:
+ app: sdc-kb
+ name: sdc-kb
+ spec:
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - sdc-es
+ - --container-name
+ - sdc-cs
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: sdc-kb-readiness
+ containers:
+ - env:
+ - name: ENVNAME
+ value: AUTO
+ - name: ELASTICSEARCH_URL
+ value: http://sdc-es:9200
+ image: {{ .Values.image.sdcKibana }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: sdc-kb
+ volumeMounts:
+ - mountPath: /root/chef-solo/environments/
+ name: sdc-environments
+ - mountPath: /etc/localtime
+ name: sdc-localtime
+ readOnly: true
+ - mountPath: /var/lib/jetty/logs
+ name: sdc-logs
+ ports:
+ - containerPort: 5601
+ readinessProbe:
+ tcpSocket:
+ port: 5601
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ volumes:
+ - name: sdc-environments
+ configMap:
+ name: sdc-environment
+ - name: sdc-localtime
+ hostPath:
+ path: /etc/localtime
+ - name: sdc-logs
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/logs
+ imagePullSecrets:
+ - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+#{{ end }}
diff --git a/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-pv-pvc.yaml b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-pv-pvc.yaml
new file mode 100644
index 0000000000..c8ce9531c2
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/deployments/sdc-pv-pvc.yaml
@@ -0,0 +1,32 @@
+#{{ if not .Values.disableSdcSdcCs }}
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: "{{ .Values.nsPrefix }}-sdc-cs-db"
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+ labels:
+ name: "{{ .Values.nsPrefix }}-sdc-cs-db"
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/sdc-cs/CS
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: sdc-cs-db
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: "{{ .Values.nsPrefix }}-sdc-cs-db"
+#{{ end }} \ No newline at end of file
diff --git a/sdc-os-chef/kubernetes/sdc/templates/jobs/sdc-config-cs.yaml b/sdc-os-chef/kubernetes/sdc/templates/jobs/sdc-config-cs.yaml
new file mode 100644
index 0000000000..e371b43377
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/jobs/sdc-config-cs.yaml
@@ -0,0 +1,53 @@
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: sdc-config-cassandra
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+ labels:
+ app: sdc-config-cassandra
+spec:
+ template:
+ metadata:
+ name: sdc-cs-init
+ spec:
+ initContainers:
+ - name: sdc-init-cs-readiness
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - sdc-cs
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ containers:
+ - name: sdc-config-cs
+ image: {{ .Values.image.sdcCassandraInit }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - mountPath: /root/chef-solo/environments/
+ name: sdc-environments
+ env:
+ - name: ENVNAME
+ value: "AUTO"
+ - name: SDC_USER
+ valueFrom:
+ secretKeyRef: {name: sdc-cs-secret, key: sdc_user}
+ - name: SDC_PASSWORD
+ valueFrom:
+ secretKeyRef: {name: sdc-cs-secret, key: sdc_password}
+ volumes:
+ - name: sdc-environments
+ configMap:
+ name: sdc-environment
+ imagePullSecrets:
+ - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+ restartPolicy: Never
diff --git a/sdc-os-chef/kubernetes/sdc/templates/jobs/sdc-config-es.yaml b/sdc-os-chef/kubernetes/sdc/templates/jobs/sdc-config-es.yaml
new file mode 100644
index 0000000000..e0f6466636
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/jobs/sdc-config-es.yaml
@@ -0,0 +1,47 @@
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: sdc-config-elasticsearch
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+ labels:
+ app: sdc-config-elasticsearch
+spec:
+ template:
+ metadata:
+ name: sdc-es-init
+ spec:
+ initContainers:
+ - name: sdc-init-es-readiness
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - sdc-es
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ containers:
+ - name: sdc-config-es
+ image: {{ .Values.image.sdcElasticsearchInit }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - mountPath: /root/chef-solo/environments/
+ name: sdc-environments
+ env:
+ - name: ENVNAME
+ value: "AUTO"
+ volumes:
+ - name: sdc-environments
+ configMap:
+ name: sdc-environment
+ imagePullSecrets:
+ - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+ restartPolicy: Never
diff --git a/sdc-os-chef/kubernetes/sdc/templates/secrets/sdc-cs-secret.yaml b/sdc-os-chef/kubernetes/sdc/templates/secrets/sdc-cs-secret.yaml
new file mode 100644
index 0000000000..2e63c60538
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/secrets/sdc-cs-secret.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: sdc-cs-secret
+ namespace: onap-sdc
+type: Opaque
+data:
+ #application user
+ sdc_user: YXNkY191c2Vy
+ sdc_password: QWExMjM0JV4h
+ #default user:
+ cs_password: b25hcDEyMyNAIQ==
+
diff --git a/sdc-os-chef/kubernetes/sdc/templates/services/all-services.yaml b/sdc-os-chef/kubernetes/sdc/templates/services/all-services.yaml
new file mode 100644
index 0000000000..4100dc24a4
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/templates/services/all-services.yaml
@@ -0,0 +1,127 @@
+#{{ if not .Values.disableSdcSdcEs }}
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: sdc-es
+ name: sdc-es
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ ports:
+ - name: sdc-es-port-9200
+ port: 9200
+ - name: sdc-es-port-9300
+ port: 9300
+ selector:
+ app: sdc-es
+ clusterIP: None
+#{{ end }}
+#{{ if not .Values.disableSdcSdcCs }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: sdc-cs
+ name: sdc-cs
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ ports:
+ - name: sdc-cs-port-9042
+ port: 9042
+ - name: sdc-cs-port-9160
+ port: 9160
+ selector:
+ app: sdc-cs
+ clusterIP: None
+#{{ end }}
+#{{ if not .Values.disableSdcSdcKb }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: sdc-kb
+ name: sdc-kb
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ ports:
+ - name: sdc-kb-port-5601
+ port: 5601
+ selector:
+ app: sdc-kb
+ clusterIP: None
+#{{ end }}
+#{{ if not .Values.disableSdcSdcBe }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: sdc-be
+ name: sdc-be
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "sdc",
+ "version": "v1",
+ "url": "/sdc/v1",
+ "protocol": "REST",
+ "port": "8080",
+ "visualRange":"1"
+ },
+ {
+ "serviceName": "sdc-deprecated",
+ "version": "v1",
+ "url": "/sdc/v1",
+ "protocol": "REST",
+ "port": "8080",
+ "visualRange":"1",
+ "path":"/sdc/v1"
+ }
+ ]'
+spec:
+ ports:
+ - name: sdc-be-port-8443
+ nodePort: {{ .Values.nodePortPrefix }}04
+ port: 8443
+ - name: sdc-be-port-8080
+ nodePort: {{ .Values.nodePortPrefix }}05
+ port: 8080
+ selector:
+ app: sdc-be
+ type: NodePort
+#{{ end }}
+#{{ if not .Values.disableSdcSdcFe }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: sdc-fe
+ name: sdc-fe
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "sdc-gui",
+ "version": "v1",
+ "url": "/sdc1",
+ "protocol": "UI",
+ "port": "8181",
+ "visualRange":"0|1"
+ }
+ ]'
+spec:
+ ports:
+ - name: sdc-fe-port-9443
+ nodePort: {{ .Values.nodePortPrefix }}07
+ port: 9443
+ - name: sdc-fe-port-8181
+ nodePort: {{ .Values.nodePortPrefix }}06
+ port: 8181
+ selector:
+ app: sdc-fe
+ type: NodePort
+#{{ end }} \ No newline at end of file
diff --git a/sdc-os-chef/kubernetes/sdc/values.yaml b/sdc-os-chef/kubernetes/sdc/values.yaml
new file mode 100644
index 0000000000..a6950b262b
--- /dev/null
+++ b/sdc-os-chef/kubernetes/sdc/values.yaml
@@ -0,0 +1,21 @@
+nsPrefix: onap
+pullPolicy: IfNotPresent
+nodePortPrefix: 302
+image:
+ readiness: oomk8s/readiness-check:1.0.0
+ sdcKibana: nexus3.onap.org:10001/onap/sdc-kibana:1.2-STAGING-latest
+ sdcFrontend: nexus3.onap.org:10001/onap/sdc-frontend:1.2-STAGING-latest
+ sdcElasticsearch: nexus3.onap.org:10001/onap/sdc-elasticsearch:1.2.0-STAGING-latest
+ sdcCassandra: nexus3.onap.org:10001/onap/sdc-cassandra:1.2.0-STAGING-latest
+ sdcBackend: nexus3.onap.org:10001/onap/sdc-backend:1.2-STAGING-latest
+ sdcElasticsearchInit: nexus3.onap.org:10001/onap/sdc-init-elasticsearch:1.2-STAGING-latest
+ sdcCassandraInit: vulpe03/cqlsh
+ filebeat: docker.elastic.co/beats/filebeat:5.5.0
+
+env:
+ name: AUTO
+ vip: sdc-cs.onap-sdc
+ nodeCS: sdc-cs.onap-sdc
+ nodeES: sdc-es.onap-sdc
+ nodeBE: sdc-be.onap-sdc
+ nodeFe: sdc-fe.onap-sdc \ No newline at end of file
diff --git a/sdc-os-chef/scripts/k8s/build_nsenter_exec.sh b/sdc-os-chef/scripts/k8s/build_nsenter_exec.sh
new file mode 100644
index 0000000000..7ee1196d98
--- /dev/null
+++ b/sdc-os-chef/scripts/k8s/build_nsenter_exec.sh
@@ -0,0 +1,22 @@
+# Ubuntu 14.04 don't have nsenter - the straight forward way required me to install build tools and etc.
+# I preferred to keep the system clean and install nsenter in a container and then copy the command to the host
+# Note - its also possible to run nsenter from a container (didn't tried) https://github.com/jpetazzo/nsenter
+
+# start a container
+docker run --name nsenter -it ubuntu:14.04 bash
+
+## in the docker
+apt-get update
+apt-get install git build-essential libncurses5-dev libslang2-dev gettext zlib1g-dev libselinux1-dev debhelper lsb-release pkg-config po-debconf autoconf automake autopoint libtool bison
+
+git clone git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git util-linux
+cd util-linux/
+
+./autogen.sh
+./configure --without-python --disable-all-programs --enable-nsenter
+make
+
+## from different shell - on the host
+echo "[Action requires] From different shell on the host run the following:"
+docker cp nsenter:/util-linux/nsenter /usr/local/bin/
+docker cp nsenter:/util-linux/bash-completion/nsenter /etc/bash_completion.d/nsenter \ No newline at end of file
diff --git a/sdc-os-chef/scripts/k8s/deploy_k8s_sdc.sh b/sdc-os-chef/scripts/k8s/deploy_k8s_sdc.sh
new file mode 100644
index 0000000000..9a7b57747b
--- /dev/null
+++ b/sdc-os-chef/scripts/k8s/deploy_k8s_sdc.sh
@@ -0,0 +1,74 @@
+#!/bin/sh
+set -x
+
+check_status()
+{
+ local rc=$1
+ shift
+ local comment="$@"
+ if [ ${rc} != 0 ]; then
+ echo "[ERR] Failure detected - ${comment}. Aborting !"
+ exit 255
+ fi
+}
+
+
+# Should be removed while private dockers (maven build) will be available:
+echo "[INFO] ONAP Docker login"
+sudo docker login -u docker -p docker nexus3.onap.org:10001
+check_status $? "Onap docker registry login"
+
+# Verify the kube-system pods are running:
+# kube-addon-manager, kube-dns, kubernetes-dashboard, storage-provisioner, tiller-deploy
+echo "[INFO] Wait for Kubernetes Service ..."
+cd ../../kubernetes
+status=0
+while [ ${status} -ne 5 ]
+do
+ status=$(sudo kubectl get pods --namespace kube-system -o json \
+ | jq -r '
+ .items[]
+ | select(.status.phase == "Running" and
+ ([ .status.conditions[] | select(.type == "Ready" and .status == "True") ]
+ | length ) == 1 )
+ | .metadata.namespace + "/" + .metadata.name
+ ' \
+ | wc -l )
+ sleep 3
+done
+
+# Create namespace
+echo "[INFO] Check Namespace existence"
+exist_namespace=$( sudo kubectl get namespaces | grep onap-sdc | grep Active | wc -l )
+if [ ${exist_namespace} -eq 0 ]; then
+ sudo kubectl create namespace onap-sdc
+ check_status $? "Create namespace"
+fi
+
+echo "[INFO] Running helm init"
+sudo helm init
+check_status $? "Helm init"
+
+set -x
+
+printf "[INFO] Wait for helm to get ready\n"
+helm_health=1
+while [ ${helm_health} -ne 0 ]
+do
+ sudo helm version | grep "Server" >/dev/null 2>&1
+ helm_health=$?
+ sleep 5
+done
+
+# Remove previous chart
+exist_chart=$( sudo helm ls onap-sdc -q | wc -l )
+if [ ${exist_chart} -ne 0 ];then
+ echo "[INFO] Delete the existing onap-sdc chart"
+ sudo helm del --purge onap-sdc
+ check_status $? "Delete chart"
+fi
+
+# Install updated chart
+echo "[INFO] Create onap-sdc deployment"
+sudo helm install sdc --name onap-sdc
+check_status $? "Install chart"
diff --git a/sdc-os-chef/scripts/k8s/etc/bash_completion.d/nsenter b/sdc-os-chef/scripts/k8s/etc/bash_completion.d/nsenter
new file mode 100644
index 0000000000..ad56f06e48
--- /dev/null
+++ b/sdc-os-chef/scripts/k8s/etc/bash_completion.d/nsenter
@@ -0,0 +1,63 @@
+_nsenter_module()
+{
+ local cur prev OPTS
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+ case $prev in
+ '-S'|'--uid')
+ COMPREPLY=( $(compgen -W "uid" -- $cur) )
+ return 0
+ ;;
+ '-G'|'--gid')
+ COMPREPLY=( $(compgen -W "gid" -- $cur) )
+ return 0
+ ;;
+ '-t'|'--target')
+ local PIDS
+ PIDS=$(cd /proc && echo [0-9]*)
+ COMPREPLY=( $(compgen -W "$PIDS" -- $cur) )
+ return 0
+ ;;
+ '-h'|'--help'|'-V'|'--version')
+ return 0
+ ;;
+ esac
+ case $cur in
+ '=')
+ # FIXME: --root and --wd should use get only
+ # directories as compgen output. If $cur is
+ # overwrote the same way as below in case segment
+ # for $prev the command-line will get mangled.
+ cur=${cur#=}
+ ;;
+ -*)
+ OPTS="
+ --all
+ --target
+ --mount=
+ --uts=
+ --ipc=
+ --net=
+ --pid=
+ --cgroup=
+ --user=
+ --setuid
+ --setgid
+ --preserve-credentials
+ --root=
+ --wd=
+ --no-fork
+ --help
+ --version
+ "
+ COMPREPLY=( $(compgen -W "${OPTS[*]}" -- $cur) )
+ return 0
+ ;;
+ esac
+ local IFS=$'\n'
+ compopt -o filenames
+ COMPREPLY=( $(compgen -f -- $cur) )
+ return 0
+}
+complete -F _nsenter_module nsenter
diff --git a/sdc-os-chef/scripts/k8s/get_helm.sh b/sdc-os-chef/scripts/k8s/get_helm.sh
new file mode 100755
index 0000000000..79e9f35203
--- /dev/null
+++ b/sdc-os-chef/scripts/k8s/get_helm.sh
@@ -0,0 +1,230 @@
+#!/usr/bin/env bash
+
+# Copyright 2016 The Kubernetes Authors All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The install script is based off of the MIT-licensed script from glide,
+# the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
+
+PROJECT_NAME="helm"
+
+: ${HELM_INSTALL_DIR:="/usr/local/bin"}
+
+# initArch discovers the architecture for this system.
+initArch() {
+ ARCH=$(uname -m)
+ case $ARCH in
+ armv5*) ARCH="armv5";;
+ armv6*) ARCH="armv6";;
+ armv7*) ARCH="armv7";;
+ aarch64) ARCH="arm64";;
+ x86) ARCH="386";;
+ x86_64) ARCH="amd64";;
+ i686) ARCH="386";;
+ i386) ARCH="386";;
+ esac
+}
+
+# initOS discovers the operating system for this system.
+initOS() {
+ OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
+
+ case "$OS" in
+ # Minimalist GNU for Windows
+ mingw*) OS='windows';;
+ esac
+}
+
+# runs the given command as root (detects if we are root already)
+runAsRoot() {
+ local CMD="$*"
+
+ if [ $EUID -ne 0 ]; then
+ CMD="sudo $CMD"
+ fi
+
+ $CMD
+}
+
+# verifySupported checks that the os/arch combination is supported for
+# binary builds.
+verifySupported() {
+ local supported="darwin-386\ndarwin-amd64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nwindows-386\nwindows-amd64"
+ if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
+ echo "No prebuilt binary for ${OS}-${ARCH}."
+ echo "To build from source, go to https://github.com/kubernetes/helm"
+ exit 1
+ fi
+
+ if ! type "curl" > /dev/null && ! type "wget" > /dev/null; then
+ echo "Either curl or wget is required"
+ exit 1
+ fi
+}
+
+# checkDesiredVersion checks if the desired version is available.
+checkDesiredVersion() {
+ # Use the GitHub releases webpage for the project to find the desired version for this project.
+ local release_url="https://github.com/kubernetes/helm/releases/${DESIRED_VERSION:-latest}"
+ if type "curl" > /dev/null; then
+ TAG=$(curl -SsL $release_url | awk '/\/tag\//' | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
+ elif type "wget" > /dev/null; then
+ TAG=$(wget -q -O - $release_url | awk '/\/tag\//' | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
+ fi
+ if [ "x$TAG" == "x" ]; then
+ echo "Cannot determine ${DESIRED_VERSION} tag."
+ exit 1
+ fi
+}
+
+# checkHelmInstalledVersion checks which version of helm is installed and
+# if it needs to be changed.
+checkHelmInstalledVersion() {
+ if [[ -f "${HELM_INSTALL_DIR}/${PROJECT_NAME}" ]]; then
+ local version=$(helm version | grep '^Client' | cut -d'"' -f2)
+ if [[ "$version" == "$TAG" ]]; then
+ echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
+ return 0
+ else
+ echo "Helm ${TAG} is available. Changing from version ${version}."
+ return 1
+ fi
+ else
+ return 1
+ fi
+}
+
+# downloadFile downloads the latest binary package and also the checksum
+# for that binary.
+downloadFile() {
+ HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
+ DOWNLOAD_URL="https://kubernetes-helm.storage.googleapis.com/$HELM_DIST"
+ CHECKSUM_URL="$DOWNLOAD_URL.sha256"
+ HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
+ HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
+ HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
+ echo "Downloading $DOWNLOAD_URL"
+ if type "curl" > /dev/null; then
+ curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
+ elif type "wget" > /dev/null; then
+ wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
+ fi
+ if type "curl" > /dev/null; then
+ curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
+ elif type "wget" > /dev/null; then
+ wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
+ fi
+}
+
+# installFile verifies the SHA256 for the file, then unpacks and
+# installs it.
+installFile() {
+ HELM_TMP="$HELM_TMP_ROOT/$PROJECT_NAME"
+ local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
+ local expected_sum=$(cat ${HELM_SUM_FILE})
+ if [ "$sum" != "$expected_sum" ]; then
+ echo "SHA sum of $HELM_TMP does not match. Aborting."
+ exit 1
+ fi
+
+ mkdir -p "$HELM_TMP"
+ tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
+ HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/$PROJECT_NAME"
+ echo "Preparing to install into ${HELM_INSTALL_DIR}"
+ runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR"
+}
+
+# fail_trap is executed if an error occurs.
+fail_trap() {
+ result=$?
+ if [ "$result" != "0" ]; then
+ if [[ -n "$INPUT_ARGUMENTS" ]]; then
+ echo "Failed to install $PROJECT_NAME with the arguments provided: $INPUT_ARGUMENTS"
+ help
+ else
+ echo "Failed to install $PROJECT_NAME"
+ fi
+ echo -e "\tFor support, go to https://github.com/kubernetes/helm."
+ fi
+ cleanup
+ exit $result
+}
+
+# testVersion tests the installed client to make sure it is working.
+testVersion() {
+ set +e
+ echo "$PROJECT_NAME installed into $HELM_INSTALL_DIR/$PROJECT_NAME"
+ HELM="$(which $PROJECT_NAME)"
+ if [ "$?" = "1" ]; then
+ echo "$PROJECT_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
+ exit 1
+ fi
+ set -e
+ echo "Run '$PROJECT_NAME init' to configure $PROJECT_NAME."
+}
+
+# help provides possible cli installation arguments
+help () {
+ echo "Accepted cli arguments are:"
+ echo -e "\t[--help|-h ] ->> prints this help"
+ echo -e "\t[--version|-v <desired_version>] . When not defined it defaults to latest"
+ echo -e "\te.g. --version v2.4.0 or -v latest"
+}
+
+# cleanup temporary files to avoid https://github.com/kubernetes/helm/issues/2977
+cleanup() {
+ rm -rf "$HELM_TMP_ROOT"
+}
+
+# Execution
+
+#Stop execution on any error
+trap "fail_trap" EXIT
+set -e
+
+# Parsing input arguments (if any)
+export INPUT_ARGUMENTS="${@}"
+set -u
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ '--version'|-v)
+ shift
+ if [[ $# -ne 0 ]]; then
+ export DESIRED_VERSION="${1}"
+ else
+ echo -e "Please provide the desired version. e.g. --version v2.4.0 or -v latest"
+ exit 0
+ fi
+ ;;
+ '--help'|-h)
+ help
+ exit 0
+ ;;
+ *) exit 1
+ ;;
+ esac
+ shift
+done
+set +u
+
+initArch
+initOS
+verifySupported
+checkDesiredVersion
+if ! checkHelmInstalledVersion; then
+ downloadFile
+ installFile
+fi
+testVersion
+cleanup
diff --git a/sdc-os-chef/scripts/k8s/install_helm.sh b/sdc-os-chef/scripts/k8s/install_helm.sh
new file mode 100644
index 0000000000..a3681d033a
--- /dev/null
+++ b/sdc-os-chef/scripts/k8s/install_helm.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+curl_status=$(curl -w '%{http_code}\n' https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get -o get_helm.sh)
+
+echo $curl_status
+
+if [ ${curl_status} != 200 ]; then
+ echo "[ERROR] Download get_helm failed - $curl_status"
+ exit -1
+fi
+
+chmod 700 get_helm.sh
+
+echo "[INFO] Running get helm"
+./get_helm.sh --version v2.7.2
+
+if [ $? != 0 ]; then
+ echo "[ERROR] failed to run get_helm"
+fi
+
diff --git a/sdc-os-chef/scripts/k8s/install_kubectl.sh b/sdc-os-chef/scripts/k8s/install_kubectl.sh
new file mode 100644
index 0000000000..8d1229b527
--- /dev/null
+++ b/sdc-os-chef/scripts/k8s/install_kubectl.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+kubectl_version=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)
+
+echo "[INFO] kubectl version - ${kubectl_version}"
+
+curl_status=$(curl -w '%{http_code}\n' -LO https://storage.googleapis.com/kubernetes-release/release/${kubectl_version}/bin/linux/amd64/kubectl)
+
+if [ $curl_status != 200 ] ; then
+ echo "[ERROR] Download kubectl failed - $curl_status"
+ exit -1
+fi
+
+chmod +x ./kubectl
+
+sudo mv ./kubectl /usr/local/bin/kubectl
+
+echo "source <(kubectl completion bash)" >> ~/.bashrc
+
diff --git a/sdc-os-chef/scripts/k8s/install_minikube.sh b/sdc-os-chef/scripts/k8s/install_minikube.sh
new file mode 100644
index 0000000000..b0f0d53cae
--- /dev/null
+++ b/sdc-os-chef/scripts/k8s/install_minikube.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+echo "[INFO] minikube version - v0.24.1"
+
+curl_status=$(curl -w '%{http_code}\n' -Lo minikube https://storage.googleapis.com/minikube/releases/v0.24.1/minikube-linux-amd64)
+
+if [ $curl_status != 200 ] ; then
+ echo "[ERROR] Download minikube failed - $curl_status"
+ exit -1
+fi
+
+chmod +x minikube
+
+sudo mv minikube /usr/local/bin/
+
+export CHANGE_MINIKUBE_NONE_USER=true
+
+sudo minikube start --vm-driver=none
diff --git a/sdc-os-chef/scripts/k8s/kubernetes_run.sh b/sdc-os-chef/scripts/k8s/kubernetes_run.sh
new file mode 100644
index 0000000000..fd9de2e181
--- /dev/null
+++ b/sdc-os-chef/scripts/k8s/kubernetes_run.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+####################
+# Functions #
+####################
+
+status()
+{
+ local rc=$1
+ if [ ${rc} != 0 ]; then
+ echo "[ERR] Failure detected. Aborting !"
+ exit 255
+ else
+ echo "[INFO] Done "
+ fi
+}
+
+print_header()
+{
+ header=$*
+ echo ""
+ echo "-------------------------"
+ echo " ${header}"
+ echo "-------------------------"
+ echo ""
+ }
+
+####################
+# Main #
+####################
+clear
+
+####################
+# kubectl #
+####################
+print_header "Kubelet - Install ..."
+sh ./install_kubectl.sh
+status $?
+
+
+####################
+# minikube #
+####################
+print_header "Minikube - Install ..."
+sh ./install_minikube.sh
+status $?
+
+
+####################
+# dependencies #
+####################
+print_header "Dependency - Install ..."
+echo "[INFO] Install - nsenter"
+# Use pre compiled nsenter:
+sudo cp bin/nsenter /usr/local/bin/nsenter
+sudo cp etc/bash_completion.d/nsenter /etc/bash_completion.d/nsenter
+
+## In order to build the nsenter use the below instructions:
+##./build_nsenter_exec.sh
+echo "[INFO] Install - socat"
+sudo apt-get install -y socat jq
+
+####################
+# helm #
+####################
+print_header "Helm - Install ..."
+sh ./install_helm.sh
+status $? "$action"
+
+
+####################
+# K8s #
+####################
+print_header "SDC - Deploy Pods ..."
+sh ./deploy_k8s_sdc.sh
+status $?
+