summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-be-script.sh18
-rw-r--r--kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-cs-script.sh18
-rw-r--r--kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-fe-script.sh18
-rw-r--r--kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-titan-script.sh18
-rw-r--r--kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdc-health.json49
-rw-r--r--kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-health.json22
-rw-r--r--kubernetes/consul/templates/consul-server-deployment.yaml2
-rw-r--r--kubernetes/consul/values.yaml2
-rwxr-xr-xkubernetes/oneclick/createAll.bash4
-rwxr-xr-xkubernetes/oneclick/deleteAll.bash2
-rw-r--r--kubernetes/oneclick/setenv.bash4
-rw-r--r--kubernetes/oneclick/vnfsdk.sh7
-rw-r--r--kubernetes/vnfsdk/Chart.yaml4
-rw-r--r--kubernetes/vnfsdk/templates/all-services.yaml30
-rw-r--r--kubernetes/vnfsdk/templates/postgres-deployment.yaml24
-rw-r--r--kubernetes/vnfsdk/templates/refrepo-deployment.yaml54
-rw-r--r--kubernetes/vnfsdk/values.yaml10
17 files changed, 279 insertions, 7 deletions
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-be-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-be-script.sh
new file mode 100644
index 0000000000..f460a3f116
--- /dev/null
+++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-be-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.onap-sdc:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component BE) and check to see if
+## the BE component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "BE" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-cs-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-cs-script.sh
new file mode 100644
index 0000000000..31cd8d3fa6
--- /dev/null
+++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-cs-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.onap-sdc:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component CASSANDRA) and check to see if
+## the CASSANDRA component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "CASSANDRA" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-fe-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-fe-script.sh
new file mode 100644
index 0000000000..6a1d035ecd
--- /dev/null
+++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-fe-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.onap-sdc:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component FE) and check to see if
+## the FE component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "FE" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-titan-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-titan-script.sh
new file mode 100644
index 0000000000..6993402653
--- /dev/null
+++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-titan-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.onap-sdc:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component TITAN) and check to see if
+## the TITAN component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "TITAN" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdc-health.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdc-health.json
new file mode 100644
index 0000000000..849e199589
--- /dev/null
+++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdc-health.json
@@ -0,0 +1,49 @@
+{
+ "service": {
+ "name": "Health Check: SDC",
+ "checks": [
+ {
+ "id": "sdc-fe-healthcheck",
+ "name": "SDC Front End Health Check",
+ "script": "/consul/config/scripts/sdc-fe-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "sdc-be-healthcheck",
+ "name": "SDC Back End Health Check",
+ "script": "/consul/config/scripts/sdc-be-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "sdc-titan-healthcheck",
+ "name": "SDC Titan Health Check",
+ "script": "/consul/config/scripts/sdc-titan-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "sdc-cs-healthcheck",
+ "name": "SDC Cassandra Health Check",
+ "script": "/consul/config/scripts/sdc-cs-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "sdc-catalog-healthcheck",
+ "name": "SDC Catalog Health Check",
+ "http": "https://sdc-be.onap-sdc:8443/asdc/v1/catalog/services",
+ "header": {
+ "Authorization": ["Basic dmlkOktwOGJKNFNYc3pNMFdYbGhhazNlSGxjc2UyZ0F3ODR2YW9HR21KdlV5MlU="],
+ "X-ECOMP-InstanceID": ["VID"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-health.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-health.json
new file mode 100644
index 0000000000..fc26d2e192
--- /dev/null
+++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-health.json
@@ -0,0 +1,22 @@
+{
+ "service": {
+ "name": "Health Check: SDNC",
+ "checks": [
+ {
+ "id": "odl-api-healthcheck",
+ "name": "SDNC API Health Check",
+ "http": "http://sdnhost.onap-sdnc:8282/restconf/operations/SLI-API:healthcheck",
+ "method": "POST",
+ "header": {
+ "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
+ "Cache-Control": ["no-cache"],
+ "Content-Type": ["application/json"],
+ "Accept": ["application/json"]
+ },
+ "tls_skip_verify": true,
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/templates/consul-server-deployment.yaml b/kubernetes/consul/templates/consul-server-deployment.yaml
index 3e6dcba865..e914b6b198 100644
--- a/kubernetes/consul/templates/consul-server-deployment.yaml
+++ b/kubernetes/consul/templates/consul-server-deployment.yaml
@@ -26,5 +26,5 @@ spec:
name: consul-server-config
volumes:
- hostPath:
- path: {{ .Values.rootHostPath }}/{{ .Values.rootHostPath }}/{{ .Values.nsPrefix }}/consul/consul-server-config
+ path: {{ .Values.rootHostPath }}/{{ .Values.nsPrefix }}/consul/consul-server-config
name: consul-server-config
diff --git a/kubernetes/consul/values.yaml b/kubernetes/consul/values.yaml
index 2b713dc278..89f3ecd0a7 100644
--- a/kubernetes/consul/values.yaml
+++ b/kubernetes/consul/values.yaml
@@ -1,4 +1,4 @@
-nsPrefix: "inf"
+nsPrefix: "onap"
nodePortPrefix: 302
consuldockerTag: "latest"
rootHostPath: "/dockerdata-nfs"
diff --git a/kubernetes/oneclick/createAll.bash b/kubernetes/oneclick/createAll.bash
index c8810aa615..2963a40c20 100755
--- a/kubernetes/oneclick/createAll.bash
+++ b/kubernetes/oneclick/createAll.bash
@@ -13,8 +13,8 @@ Usage: $0 [PARAMs]
-i [INSTANCE] : ONAP deployment instance # (default: 1)
-a [APP] : Specify a specific ONAP component (default: all)
from the following choices:
- sdc, aai ,mso, message-router, robot,
- vid, sdnc, portal, policy, appc, multicloud, clamp, consul
+ sdc, aai ,mso, message-router, robot, vid,
+ sdnc, portal, policy, appc, multicloud, clamp, consul, vnfsdk
EOF
}
diff --git a/kubernetes/oneclick/deleteAll.bash b/kubernetes/oneclick/deleteAll.bash
index 8f44f125d5..f3b8da7238 100755
--- a/kubernetes/oneclick/deleteAll.bash
+++ b/kubernetes/oneclick/deleteAll.bash
@@ -28,7 +28,7 @@ Usage: $0 [PARAMs]
-a [APP] : Specify a specific ONAP component (default: all)
from the following choices:
sdc, aai ,mso, message-router, robot,
- vid, sdnc, portal, policy, appc, multicloud, clamp
+ vid, sdnc, portal, policy, appc, multicloud, clamp, consul, vnfsdk
EOF
}
diff --git a/kubernetes/oneclick/setenv.bash b/kubernetes/oneclick/setenv.bash
index 6dfd948924..16472e7522 100644
--- a/kubernetes/oneclick/setenv.bash
+++ b/kubernetes/oneclick/setenv.bash
@@ -1,7 +1,7 @@
#!/bin/bash
# Deploying MSB first and kube2msb last will ensure all the ONAP services can be registered to MSB
-HELM_APPS=('consul' 'msb' 'mso' 'message-router' 'sdnc' 'vid' 'robot' 'portal' 'policy' 'appc' 'aai' 'sdc' 'dcaegen2' 'log' 'cli' 'multicloud' 'clamp' 'kube2msb')
+HELM_APPS=('consul' 'msb' 'mso' 'message-router' 'sdnc' 'vid' 'robot' 'portal' 'policy' 'appc' 'aai' 'sdc' 'dcaegen2' 'log' 'cli' 'multicloud' 'clamp' 'vnfsdk' 'kube2msb')
ONAP_DOCKER_REGISTRY=${ONAP_DOCKER_REGISTRY:-nexus3.onap.org:10001}
ONAP_DOCKER_USER=${ONAP_DOCKER_USER:-docker}
ONAP_DOCKER_PASS=${ONAP_DOCKER_PASS:-docker}
@@ -12,4 +12,4 @@ ONAP_DOCKER_MAIL=${ONAP_DOCKER_MAIL:-$USERNAME@$USERDOMAIN}
OPENSTACK_PRIVATE_KEY_PATH=${OPENSTACK_PRIVATE_KEY_PATH:-~/.ssh/onap_rsa}
# dcaegen2 bootstrap configuration input yaml file. Start from the sample, and set your environments real values:
# example: export DCAEGEN2_CONFIG_INPUT_FILE_PATH=/tmp/dcae-parameters.yaml
-DCAEGEN2_CONFIG_INPUT_FILE_PATH=${DCAEGEN2_CONFIG_INPUT_FILE_PATH:-../dcaegen2/dcae-parameters-sample.yaml} \ No newline at end of file
+DCAEGEN2_CONFIG_INPUT_FILE_PATH=${DCAEGEN2_CONFIG_INPUT_FILE_PATH:-../dcaegen2/dcae-parameters-sample.yaml}
diff --git a/kubernetes/oneclick/vnfsdk.sh b/kubernetes/oneclick/vnfsdk.sh
new file mode 100644
index 0000000000..c2853f7e1e
--- /dev/null
+++ b/kubernetes/oneclick/vnfsdk.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
+
+printf "VNFSDK....\n"
+
+$KUBECTL_CMD/postgres-deployment.yaml
+$KUBECTL_CMD/refrepo-deployment.yaml
diff --git a/kubernetes/vnfsdk/Chart.yaml b/kubernetes/vnfsdk/Chart.yaml
new file mode 100644
index 0000000000..2dee27b360
--- /dev/null
+++ b/kubernetes/vnfsdk/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: A Helm chart for Kubernetes
+name: vnfsdk
+version: 1.1.0
diff --git a/kubernetes/vnfsdk/templates/all-services.yaml b/kubernetes/vnfsdk/templates/all-services.yaml
new file mode 100644
index 0000000000..1f11729667
--- /dev/null
+++ b/kubernetes/vnfsdk/templates/all-services.yaml
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: postgres
+ namespace: {{ .Values.nsPrefix }}-vnfsdk
+ labels:
+ app: postgres
+spec:
+ ports:
+ - name: postgres-port
+ port: 5432
+ selector:
+ app: postgres
+ clusterIP: None
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: refrepo
+ name: refrepo
+ namespace: {{ .Values.nsPrefix }}-vnfsdk
+spec:
+ ports:
+ - name: "refrepo-port"
+ port: 8702
+ nodePort: {{ .Values.nodePortPrefix }}97
+ selector:
+ app: refrepo
+ type: NodePort
diff --git a/kubernetes/vnfsdk/templates/postgres-deployment.yaml b/kubernetes/vnfsdk/templates/postgres-deployment.yaml
new file mode 100644
index 0000000000..76721d1eae
--- /dev/null
+++ b/kubernetes/vnfsdk/templates/postgres-deployment.yaml
@@ -0,0 +1,24 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: postgres
+ namespace: {{ .Values.nsPrefix }}-vnfsdk
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: postgres
+ template:
+ metadata:
+ labels:
+ app: postgres
+ name: postgres
+ spec:
+ hostname: postgres
+ containers:
+ - args:
+ image: {{ .Values.image.postgresImage }}:{{ .Values.image.postgresVersion }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: "postgres"
+ imagePullSecrets:
+ - name: {{ .Values.nsPrefix }}-docker-registry-key
diff --git a/kubernetes/vnfsdk/templates/refrepo-deployment.yaml b/kubernetes/vnfsdk/templates/refrepo-deployment.yaml
new file mode 100644
index 0000000000..3accb96967
--- /dev/null
+++ b/kubernetes/vnfsdk/templates/refrepo-deployment.yaml
@@ -0,0 +1,54 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: refrepo
+ name: refrepo
+ namespace: "{{ .Values.nsPrefix }}-vnfsdk"
+spec:
+ selector:
+ matchLabels:
+ app: refrepo
+ template:
+ metadata:
+ labels:
+ app: refrepo
+ name: refrepo
+ annotations:
+ pod.beta.kubernetes.io/init-containers: '[
+ {
+ "args": [
+ "--container-name",
+ "postgres"
+ ],
+ "command": [
+ "/root/ready.py"
+ ],
+ "env": [
+ {
+ "name": "NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ }
+ ],
+ "image": "{{ .Values.image.readiness }}:{{ .Values.image.readinessVersion }}",
+ "imagePullPolicy": "{{ .Values.pullPolicy }}",
+ "name": "vnfsdk-readiness"
+ }
+ ]'
+ spec:
+ containers:
+ - image: {{ .Values.image.refrepoImage }}:{{ .Values.image.refrepoVersion }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: refrepo
+ readinessProbe:
+ tcpSocket:
+ port: 8702
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ imagePullSecrets:
+ - name: {{ .Values.nsPrefix }}-docker-registry-key
diff --git a/kubernetes/vnfsdk/values.yaml b/kubernetes/vnfsdk/values.yaml
new file mode 100644
index 0000000000..e445ef6232
--- /dev/null
+++ b/kubernetes/vnfsdk/values.yaml
@@ -0,0 +1,10 @@
+nsPrefix: onap
+pullPolicy: Always
+nodePortPrefix: 302
+image:
+ readiness: oomk8s/readiness-check
+ readinessVersion: 1.0.0
+ postgresImage: nexus3.onap.org:10001/onap/refrepo/postgres
+ postgresVersion: latest
+ refrepoImage: nexus3.onap.org:10001/onap/refrepo
+ refrepoVersion: 1.0-STAGING-latest