diff options
19 files changed, 531 insertions, 29 deletions
diff --git a/kubernetes/aai/templates/all-services.yaml b/kubernetes/aai/templates/all-services.yaml index 5d1c09e111..0cf62d0fec 100644 --- a/kubernetes/aai/templates/all-services.yaml +++ b/kubernetes/aai/templates/all-services.yaml @@ -150,6 +150,25 @@ metadata: "enable_ssl": true, "visualRange": "1", "path": "/aai/v11/network" + }, + { + "serviceName": "aai-externalSystem", + "version": "v11", + "url": "/aai/v11/external-system", + "protocol": "REST", + "port": "8443", + "enable_ssl": true, + "visualRange": "1" + }, + { + "serviceName": "aai-externalSystem-deprecated", + "version": "v11", + "url": "/aai/v11/external-system", + "protocol": "REST", + "port": "8443", + "enable_ssl": true, + "visualRange": "1", + "path": "/aai/v11/external-system" } ]' spec: diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-hbase-health.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-hbase-health.json new file mode 100644 index 0000000000..07828431fc --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-hbase-health.json @@ -0,0 +1,21 @@ +{ + "service": { + "name": "A&AI HBase Health Check", + "checks": [ + { + "id": "hbase-aai", + "name": "HBase Health Check", + "http": "http://hbase.onap-aai:8080/status/cluster", + "method": "GET", + "header": { + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb01-healthcheck.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb01-healthcheck.json new file mode 100644 index 0000000000..52ffdc0b6d --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb01-healthcheck.json @@ -0,0 +1,14 @@ +{ + "service": { + "name": "Health Check: APPC-SDN-CTL-DB-01", + "checks": [ + { + "id": "sdnctldb01.onap-appc", + "name": "APPC SDNCTLDB01 Health Check", + "tcp": "sdnctldb01.onap-appc:3306", + "interval": "10s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb02-healthcheck.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb02-healthcheck.json new file mode 100644 index 0000000000..d6ac666ff4 --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb02-healthcheck.json @@ -0,0 +1,14 @@ +{ + "service": { + "name": "Health Check: APPC-SDN-CTL-DB-02", + "checks": [ + { + "id": "sdnctldb02.onap-appc", + "name": "APPC SDNCTLDB02 Health Check", + "tcp": "sdnctldb02.onap-appc:3306", + "interval": "10s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json new file mode 100644 index 0000000000..a5738b3277 --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json @@ -0,0 +1,39 @@ +{ + "service": { + "name": "Health Check: MSB", + "checks": [ + { + "id": "msb-eag.onap-msb", + "name": "MSB eag Health Check", + "http": "http://msb-eag.onap-msb:80/iui/microservices/default.html", + "method": "HEAD", + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "msb-iag.onap-msb", + "name": "MSB iag Health Check", + "http": "http://msb-iag.onap-msb:80/iui/microservices/default.html", + "method": "HEAD", + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "msb-consul.onap-msb", + "name": "MSB consul Health Check", + "tcp": "msb-consul.onap-msb:8500", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "msb-discovery.onap-msb", + "name": "MSB discovery Health Check", + "tcp": "msb-discovery.onap-msb:10081", + "interval": "15s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-mariabdb.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-mariabdb.json new file mode 100644 index 0000000000..710f4a8499 --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-mariabdb.json @@ -0,0 +1,15 @@ +{ + "service": { + "name": "Health Check: MSO - MariaDb", + "checks": [ + { + "id": "mso-mariadb", + "name": "MSO Mariadb Health Check", + "script": "/consul/config/scripts/mso-mariadb-script.sh", + "interval": "10s", + "timeout": "1s" + } + ] + + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-mariadb-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-mariadb-script.sh new file mode 100755 index 0000000000..84b22206e0 --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-mariadb-script.sh @@ -0,0 +1,14 @@ +NAME=$(/consul/config/bin/kubectl -n onap-mso get pod | grep -o "mariadb[^[:space:]]*") + + if [ -n "$NAME" ]; then + if /consul/config/bin/kubectl -n onap-mso exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then + echo Success. mariadb process is running. 2>&1 + exit 0 + else + echo Failed. mariadb process is not running. 2>&1 + exit 1 + fi + else + echo Failed. mariadb container is offline. 2>&1 + exit 1 + fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh new file mode 100755 index 0000000000..6bb07f80eb --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh @@ -0,0 +1,13 @@ +SDNC_DBHOST_POD=$(/consul/config/bin/kubectl -n onap-sdnc get pod | grep -o "sdnc-dbhost-[^[:space:]]*") +if [ -n "$SDNC_DBHOST_POD" ]; then + if /consul/config/bin/kubectl -n onap-sdnc exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then + echo Success. SDNC DBHost is running. 2>&1 + exit 0 + else + echo Failed. SDNC DBHost is not running. 2>&1 + exit 1 + fi +else + echo Failed. SDNC DBHost is offline. 2>&1 + exit 1 +fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dbhost.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dbhost.json new file mode 100644 index 0000000000..28d711b72f --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dbhost.json @@ -0,0 +1,14 @@ +{ + "service": { + "name": "Health Check: SDNC - DB Host", + "checks": [ + { + "id": "sdnc-dbhost-healthcheck", + "name": "SDNC DBHOST Health Check", + "script": "/consul/config/scripts/sdnc-dbhost-script.sh", + "interval": "10s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-portal-health.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-portal-health.json new file mode 100644 index 0000000000..3ecc1b3f53 --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-portal-health.json @@ -0,0 +1,21 @@ +{ + "service": { + "name": "Health Check: SDNC Portal", + "checks": [ + { + "id": "sdnc-portal", + "name": "SDNC Portal Health Check", + "http": "http://sdnc-portal.onap-sdnc:8843/login", + "method": "HEAD", + "header": { + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb01-healthcheck.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb01-healthcheck.json new file mode 100644 index 0000000000..321852e805 --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb01-healthcheck.json @@ -0,0 +1,14 @@ +{ + "service": { + "name": "Health Check: SDNC-SDN-CTL-DB-01", + "checks": [ + { + "id": "sdnctldb01.onap-sdnc", + "name": "SDNC SDNCTLDB01 Health Check", + "tcp": "sdnctldb01.onap-sdnc:3306", + "interval": "10s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb02-healthcheck.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb02-healthcheck.json new file mode 100644 index 0000000000..106a2069bb --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb02-healthcheck.json @@ -0,0 +1,14 @@ +{ + "service": { + "name": "Health Check: SDNC-SDN-CTL-DB-02", + "checks": [ + { + "id": "sdnctldb02.onap-sdnc", + "name": "SDNC SDNCTLDB02 Health Check", + "tcp": "sdnctldb02.onap-sdnc:3306", + "interval": "10s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json new file mode 100644 index 0000000000..c7f83a8bfe --- /dev/null +++ b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json @@ -0,0 +1,112 @@ +{ + "service": { + "name": "Health Check: VFC", + "checks": [ + { + "id": "vfc-catalog.onap-vfc", + "name": "VFC catalog Health Check", + "tcp": "vfc-catalog.onap-vfc:8806", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-emsdriver.onap-vfc", + "name": "VFC emsdriver Health Check", + "tcp": "vfc-emsdriver.onap-vfc:8206", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-gvnfmdriver.onap-vfc", + "name": "VFC gvnfmdriver Health Check", + "tcp": "vfc-gvnfmdriver.onap-vfc:8484", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-hwvnfmdriver.onap-vfc", + "name": "VFC hwvnfmdriver Health Check", + "tcp": "vfc-hwvnfmdriver.onap-vfc:8482", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-jujudriver.onap-vfc", + "name": "VFC jujudriver Health Check", + "tcp": "vfc-jujudriver.onap-vfc:8483", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-nokiavnfmdriver.onap-vfc", + "name": "VFC nokiavnfmdriver Health Check", + "tcp": "vfc-nokiavnfmdriver.onap-vfc:8486", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-nslcm.onap-vfc", + "name": "VFC nslcm Health Check", + "tcp": "vfc-nslcm.onap-vfc:8403", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-resmgr.onap-vfc", + "name": "VFC resmgr Health Check", + "tcp": "vfc-resmgr.onap-vfc:8480", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-vnflcm.onap-vfc", + "name": "VFC vnflcm Health Check", + "tcp": "vfc-vnflcm.onap-vfc:8801", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-vnfmgr.onap-vfc", + "name": "VFC vnfmgr Health Check", + "tcp": "vfc-vnfmgr.onap-vfc:8803", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-vnfres.onap-vfc", + "name": "VFC vnfres Health Check", + "tcp": "vfc-vnfres.onap-vfc:8802", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-workflow.onap-vfc", + "name": "VFC workflow Health Check", + "tcp": "vfc-workflow.onap-vfc:10550", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-workflowengineactiviti.onap-vfc", + "name": "VFC workflow-engine Health Check", + "tcp": "vfc-workflowengineactiviti.onap-vfc:8080", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-ztesdncdriver.onap-vfc", + "name": "VFC ztesdncdriver Health Check", + "tcp": "vfc-ztesdncdriver.onap-vfc:8411", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "vfc-ztevnfmdriver.onap-vfc", + "name": "VFC ztevnfmdriver Health Check", + "tcp": "vfc-ztevnfmdriver.onap-vfc:8410", + "interval": "15s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/config/prepull_docker.sh b/kubernetes/config/prepull_docker.sh new file mode 100755 index 0000000000..e456c3e016 --- /dev/null +++ b/kubernetes/config/prepull_docker.sh @@ -0,0 +1,145 @@ +#!/bin/bash + +#function to provide help +#desc: this function provide help menu +#argument: -h for help, -p for path, -r for repository +#calling syntax: options + +options() { + cat <<EOF +Usage: $0 [PARAMs] +-h : help +-l (Location) : path for searching values.yaml + [in case no path is provided then is will scan current directories for values.yml] +-r (Repository) : name of image repository + [format [repository name/url]:(port)] + [in case no repository is provided then defualt image repository will be nexus3.onap.org:10001] +-u (User) : user name for login + [in case no user name is provided then default user will be docker] +-p (Password) : password for login + [in case no password is provided then default user will be docker] +EOF +} + +#function to parse yaml file +#desc: this function convert yaml file to dotted notion +#argument: yaml file +#calling syntax: parse_yaml <yaml_file_name> + +function parse_yaml { + local prefix=$2 + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\):|\1|" \ + -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])(".")} + printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3); + } + }' +} + +#algorithmic steps +#start +#scan all values.yaml files +#parse yaml file into dotted format +#for each lines check there is image tag in line +#store image name and check next line for version information +#if in next line version is not present as a subtag then call docker pull with imageName +#if version is present in next line then call docker pull with imageName and imageVersion +#end + + +#start processing for finding images and version +IMAGE_TEXT="image" +IMAGE_VERSION_TEXT="Version" +LOCATION="." +VALUES_FILE_NAME="values.yaml" +IMAGE_REPOSITORY="nexus3.onap.org:10001" +USER_NAME="docker" +PASSWORD="docker" + +#scan for options menu +while getopts ":h:l:r:u:p:" PARAM; do + case $PARAM in + h) + options + exit 1 + ;; + l) + LOCATION=${OPTARG} + ;; + r) + IMAGE_REPOSITORY=${OPTARG} + ;; + u) + USER_NAME=${OPTARG} + ;; + p) + PASSWORD=${OPTARG} + ;; + ?) + options + exit + ;; + esac +done + + +#docker login to nexus repo +echo docker login -u $USER_NAME -p $PASSWORD $IMAGE_REPOSITORY +docker login -u $USER_NAME -p $PASSWORD $IMAGE_REPOSITORY + +#scan all values.yaml files recursively +for filename in `find $LOCATION -name $VALUES_FILE_NAME` +do + imageNameWithVersion=" "; + #parse yaml files + for line in `parse_yaml $filename` + do + #find all image subtag inside converted values.yaml file's lines + if echo $line | grep -q $IMAGE_TEXT ; then + #find imageName inside line + imageName=`echo $line | awk -F "=" '{print $2}'` + #remove attional prefix and postfix + imageNameFinal=`echo "$imageName" | sed -e 's/^"//' -e 's/"$//' ` + + #check if line contain Version as a subtag in lines if yes then call docker pull with version + if echo $line | grep -q $IMAGE_VERSION_TEXT ; then + echo docker pull "$imageNameWithVersion":"$imageNameFinal" + docker pull $imageNameWithVersion:$imageNameFinal & + imageNameWithVersion=" " + else + #check Version is not in subtag and old scanned value is present then call docker pull without version + if [ "$imageNameWithVersion" != " " ]; then + echo docker pull "$imageNameWithVersion" + docker pull $imageNameWithVersion & + imageNameWithVersion=$imageNameFinal + else + imageNameWithVersion=$imageNameFinal + fi + fi + + + fi + + + done +done +# complete processing +echo "finished launching pulls" +#MAX_WAIT_INTERVALS=300 +INTERVAL_COUNT=300 +while [ $(ps -ef | grep docker | grep pull | grep -v $0 | wc -l) -gt 0 ]; do + sleep 10 + INTERVAL_COUNT=$((INTERVAL_COUNT - 1)) + echo "waiting for last pull" + if [ "$INTERVAL_COUNT" -eq 0 ]; then + break + fi +done + diff --git a/kubernetes/oneclick/deleteAll.bash b/kubernetes/oneclick/deleteAll.bash index b41679b061..53f1d1a9cb 100755 --- a/kubernetes/oneclick/deleteAll.bash +++ b/kubernetes/oneclick/deleteAll.bash @@ -43,6 +43,8 @@ usage() { Usage: $0 [PARAMs] -u : Display usage -n [NAMESPACE] : Kubernetes namespace (required) +-c : kubectl context (default: current context) +-y : Skip interactive confirmation (default: no) -a [APP] : Specify a specific ONAP component (default: all) from the following choices: sdc, aai ,mso, message-router, robot, vid, aaf, uui @@ -56,8 +58,10 @@ NS= INCL_SVC=false APP= WAIT_TERMINATE=true +SKIP_INTERACTIVE_CONFIRMATION=no +KUBECTL_CONTEXT= -while getopts ":n:u:s:a:N" PARAM; do +while getopts ":c:n:u:s:a:yN" PARAM; do case $PARAM in u) usage @@ -76,6 +80,12 @@ while getopts ":n:u:s:a:N" PARAM; do N) WAIT_TERMINATE=false ;; + y) + SKIP_INTERACTIVE_CONFIRMATION=yes + ;; + c) + KUBECTL_CONTEXT=${OPTARG} + ;; ?) usage exit @@ -88,6 +98,29 @@ if [[ -z $NS ]]; then exit 1 fi +if [[ "$SKIP_INTERACTIVE_CONFIRMATION" != yes ]]; then + current_kubectl_context=$(kubectl config get-contexts |grep "*" |awk '{print $2}') + if test "$KUBECTL_CONTEXT" != "$current_kubectl_context"; then + printf "Current kubectl context does not match context specified:\x1b[31m $current_kubectl_context\x1b[0m\n" + if [ ! -z "$KUBECTL_CONTEXT" -a "$KUBECTL_CONTEXT" != " " ]; then + read -p "Do you wish to switch context to $KUBECTL_CONTEXT and continue?" yn + case $yn in + [Yy]* ) kubectl config use-context $KUBECTL_CONTEXT;; + * ) printf "Skipping delete...\n"; exit;; + esac + else + printf "You are about to delete deployment from:\x1b[31m $current_kubectl_context\x1b[0m\n" + read -p "To continue enter context name: " response + + if test "$response" != "$current_kubectl_context" + then + printf "Your response does not match current context! Skipping delete ...\n" + exit 1 + fi + fi + fi +fi + if [[ ! -z "$APP" ]]; then HELM_APPS=($APP) fi diff --git a/kubernetes/sdnc/values.yaml b/kubernetes/sdnc/values.yaml index d04ff84cbd..5272b653b9 100644 --- a/kubernetes/sdnc/values.yaml +++ b/kubernetes/sdnc/values.yaml @@ -1,5 +1,5 @@ nsPrefix: onap -pullPolicy: Always +pullPolicy: IfNotPresent nodePortPrefix: 302 image: readiness: oomk8s/readiness-check:1.0.0 diff --git a/kubernetes/vfc/templates/all-services.yaml b/kubernetes/vfc/templates/all-services.yaml index 98a0c4b0a1..ff9eea4587 100755 --- a/kubernetes/vfc/templates/all-services.yaml +++ b/kubernetes/vfc/templates/all-services.yaml @@ -84,36 +84,36 @@ spec: app: vfc-gvnfmdriver type: NodePort #{{ end }} -#{{ if not .Values.disableVfcVfcZtevmanagerdriver }} +#{{ if not .Values.disableVfcVfcztevnfmdriver }} --- apiVersion: v1 kind: Service metadata: labels: - app: vfc-ztevmanagerdriver - name: vfc-ztevmanagerdriver + app: vfc-ztevnfmdriver + name: vfc-ztevnfmdriver namespace: "{{ .Values.nsPrefix }}-vfc" annotations: msb.onap.org/service-info: '[ { - "serviceName": "ztevmanagerdriver", + "serviceName": "ztevnfmdriver", "version": "v1", - "url": "/api/ztevmanagerdriver/v1", + "url": "/api/ztevnfmdriver/v1", "protocol": "REST", - "port": "{{.Values.ztevmanagerdriver.port}}", + "port": "{{.Values.ztevnfmdriver.port}}", "visualRange":"1" } ]' spec: ports: - - name: vfc-ztevmanagerdriver - port: {{.Values.ztevmanagerdriver.port}} - nodePort: {{.Values.ztevmanagerdriver.nodePort}} + - name: vfc-ztevnfmdriver + port: {{.Values.ztevnfmdriver.port}} + nodePort: {{.Values.ztevnfmdriver.nodePort}} selector: - app: vfc-ztevmanagerdriver + app: vfc-ztevnfmdriver type: NodePort #{{ end }} -#{{ if not .Values.disableVfcVfcHwvnfmdriver }} +#{{ if not .Values.disableVfcVfcNokiavnfmdriver }} --- apiVersion: v1 kind: Service @@ -139,10 +139,10 @@ spec: port: {{.Values.nokiavnfmdriver.port}} nodePort: {{.Values.nokiavnfmdriver.nodePort}} selector: - app: vfc-ztevmanagerdriver + app: vfc-nokiavnfmdriver type: NodePort #{{ end }} -#{{ if not .Values.disableVfcNokiaVnfmDriver }} +#{{ if not .Values.disableVfcVfcHwvnfmdriver }} --- apiVersion: v1 kind: Service @@ -164,7 +164,7 @@ metadata: ]' spec: ports: - - name: vfc-ztevmanagerdriver + - name: vfc-hwvnfmdriver port: {{.Values.hwvnfmdriver.port}} nodePort: {{.Values.hwvnfmdriver.nodePort}} selector: diff --git a/kubernetes/vfc/templates/vfc-ztevmanagerdriver-deployment.yaml b/kubernetes/vfc/templates/vfc-ztevmanagerdriver-deployment.yaml index 5559cd05ac..aed359eceb 100755 --- a/kubernetes/vfc/templates/vfc-ztevmanagerdriver-deployment.yaml +++ b/kubernetes/vfc/templates/vfc-ztevmanagerdriver-deployment.yaml @@ -1,33 +1,33 @@ -#{{ if not .Values.disableVfcVfcZtevmanagerdriver }} +#{{ if not .Values.disableVfcVfcztevnfmdriver }} apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: vfc-ztevmanagerdriver + name: vfc-ztevnfmdriver namespace: "{{ .Values.nsPrefix }}-{{ .Chart.Name }}" spec: - replicas: {{ .Values.ztevmanagerdriver.replicas }} + replicas: {{ .Values.ztevnfmdriver.replicas }} selector: matchLabels: - app: vfc-ztevmanagerdriver + app: vfc-ztevnfmdriver template: metadata: labels: - app: vfc-ztevmanagerdriver - name: vfc-ztevmanagerdriver + app: vfc-ztevnfmdriver + name: vfc-ztevnfmdriver spec: - hostname: vfc-ztevmanagerdriver + hostname: vfc-ztevnfmdriver containers: - args: - image: {{.Values.ztevmanagerdriver.image}} - name: "vfc-ztevmanagerdriver" + image: {{.Values.ztevnfmdriver.image}} + name: "vfc-ztevnfmdriver" env: - name: MSB_ADDR value: {{ .Values.msbaddr }} ports: - - containerPort: {{ .Values.ztevmanagerdriver.port }} + - containerPort: {{ .Values.ztevnfmdriver.port }} readinessProbe: tcpSocket: - port: {{ .Values.ztevmanagerdriver.port }} + port: {{ .Values.ztevnfmdriver.port }} initialDelaySeconds: 5 periodSeconds: 10 imagePullPolicy: "{{ .Values.pullPolicy }}" diff --git a/kubernetes/vfc/values.yaml b/kubernetes/vfc/values.yaml index fe5acd634e..46aa9b71f8 100644 --- a/kubernetes/vfc/values.yaml +++ b/kubernetes/vfc/values.yaml @@ -16,8 +16,8 @@ gvnfmdriver: port: 8484 nodePort: 30484 replicas: 1 -ztevmanagerdriver: - image: nexus3.onap.org:10001/onap/vfc/ztevmanagerdriver:v1.0.2 +ztevnfmdriver: + image: nexus3.onap.org:10001/onap/vfc/ztevnfmdriver:v1.0.2 port: 8410 nodePort: 30410 replicas: 1 |