summaryrefslogtreecommitdiffstats
path: root/kubernetes/contrib/tools
diff options
context:
space:
mode:
Diffstat (limited to 'kubernetes/contrib/tools')
-rwxr-xr-xkubernetes/contrib/tools/oomstat.py256
-rwxr-xr-xkubernetes/contrib/tools/registry-initialize.sh152
-rwxr-xr-xkubernetes/contrib/tools/rke/rke_setup.sh353
3 files changed, 0 insertions, 761 deletions
diff --git a/kubernetes/contrib/tools/oomstat.py b/kubernetes/contrib/tools/oomstat.py
deleted file mode 100755
index 464290d3f6..0000000000
--- a/kubernetes/contrib/tools/oomstat.py
+++ /dev/null
@@ -1,256 +0,0 @@
-#!/usr/bin/env python
-
-#
-# Copyright (c) 2018 Orange
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Provides utilities to display oom (sub)modules resources stats
-"""
-
-import os
-import sys
-import getopt
-from fnmatch import fnmatch as match
-import yaml
-
-def info(thing):
- if thing:
- sys.stderr.write("{}\n".format(thing))
-
-try:
- from tabulate import tabulate
-except ImportError as e:
- info("Warning: cannot import tabulate module (): {}".format(str(e)))
- def tabulate(lines, headers, tablefmt=None):
- ''' basic tabulate function '''
- fmt = ""
- nbco = len(headers)
- lenco = map(len, headers)
- for line in lines:
- for i in range(nbco):
- lenco[i] = max(lenco[i], len(str(line[i])))
-
- fmt = map(lambda n: "{{:<{}}}".format(n), map(lambda i: i+2, lenco))
- fmt = " ".join(fmt)
- sep = map(lambda x: '-'*(x+2), lenco)
-
- output = [fmt.format(*headers), fmt.format(*sep)]
- for line in lines:
- output.append(fmt.format(*line))
- return "\n".join(output)
-
-
-def values(root='.'):
- ''' Get the list of values.yaml files '''
- a = []
- for dirname, dirnames, filenames in os.walk(root):
- for filename in filenames:
- if filename == 'values.yaml':
- a.append((dirname, filename))
-
- if '.git' in dirnames:
- # don't go into any .git directories.
- dirnames.remove('.git')
- return a
-
-
-def keys(dic, prefix=None):
- ''' recursively traverse the specified dict to collect existing keys '''
- result = []
- if dic:
- for k, v in dic.items():
- if prefix:
- k = '.'.join((prefix, k))
- if isinstance(v, dict):
- result += keys(v, k)
- else:
- result.append(k)
- return result
-
-
-class Project:
- '''
- class to access to oom (sub)module (aka project) resources
- '''
-
- def __init__(self, dirname, filename):
- self.dirname = os.path.normpath(dirname)
- self.name = self.explicit()
- self.filename = os.path.join(dirname, filename)
- self.resources = None
- self.load()
-
- def load(self):
- ''' load resources from yaml description '''
- with open(self.filename, 'r') as istream:
- try:
- v = yaml.load(istream)
- if v:
- self.resources = v.get('resources', None)
- except Exception as e:
- print(e)
- raise
-
- def explicit(self):
- ''' return an explicit name for the project '''
- path = []
- head, name = os.path.split(self.dirname)
- if not name:
- return head
- while head:
- head, tail = os.path.split(head)
- if tail:
- path.append(tail)
- else:
- path.append(head)
- head = None
- path.reverse()
- index = path.index('charts') if 'charts' in path else None
- if index:
- name = os.path.join(path[index-1], name)
- return name
-
- def __contains__(self, key):
- params = self.resources
- if key:
- for k in key.split('.'):
- if params and k in params:
- params = params[k]
- else:
- return False
- return True
-
- def __getitem__(self, key):
- params = self.resources
- for k in key.split('.'):
- if k in params:
- params = params[k]
- if params != self.resources:
- return params
-
- def get(self, key, default="-"):
- """ mimic dict method """
- if key in self:
- return self[key]
- return default
-
- def keys(self):
- """ mimic dict method """
- return keys(self.resources)
-
-
-#
-#
-#
-
-def usage(status=None):
- """ usage doc """
- arg0 = os.path.basename(os.path.abspath(sys.argv[0]))
- print("""Usage: {} [options] <root-directory>""".format(arg0))
- print((
- "\n"
- "Options:\n"
- "-h, --help Show this help message and exit\n"
- "-t, --table <format> Use the specified format to display the result table.\n"
- " Valid formats are those from the python `tabulate'\n"
- " module. When not available, a basic builtin tabular\n"
- " function is used and this field has no effect\n"
- "-f, --fields Comma separated list of resources fields to display.\n"
- " You may use wildcard patterns, eg small.*. Implicit\n"
- " value is *, ie all available fields will be used\n"
- "Examples:\n"
- " # {0} /opt/oom/kubernetes\n"
- " # {0} -f small.\\* /opt/oom/kubernetes\n"
- " # {0} -f '*requests.*' -t fancy_grid /opt/oom/kubernetes\n"
- " # {0} -f small.requests.cpu,small.requests.memory /opt/oom/kubernetes\n"
- ).format(arg0))
- if status is not None:
- sys.exit(status)
-
-
-def getopts():
- """ read options from cmdline """
- opts, args = getopt.getopt(sys.argv[1:],
- "hf:t:",
- ["help", "fields=", "table="])
- if len(args) != 1:
- usage(1)
-
- root = args[0]
- table = None
- fields = ['*']
- patterns = []
-
- for opt, arg in opts:
- if opt in ("-h", '--help'):
- usage(0)
- elif opt in ("-f", "--fields"):
- fields = arg.split(',')
- elif opt in ("-t", "--table"):
- table = arg
-
- return root, table, fields, patterns
-
-
-def main():
- """ main """
- try:
- root, table, fields, patterns = getopts()
- except getopt.GetoptError as e:
- print("Error: {}".format(e))
- usage(1)
-
- if not os.path.isdir(root):
- info("Cannot open {}: Not a directory".format(root))
- return
-
- # find projects
- projects = []
- for dirname, filename in values(root):
- projects.append(Project(dirname, filename))
- if not projects:
- info("No projects found in {} directory".format(root))
- return
-
- # check if we want to use pattern matching (wildcard only)
- if fields and reduce(lambda x, y: x or y,
- map(lambda string: '*' in string, fields)):
- patterns = fields
- fields = []
-
- # if fields are not specified or patterns are used, discover available fields
- # and use them (sort for readability)
- if patterns or not fields:
- avail = sorted(set(reduce(lambda x, y: x+y,
- map(lambda p: p.keys(), projects))))
- if patterns:
- for pattern in patterns:
- fields += filter(lambda string: match(string, pattern), avail)
- else:
- fields = avail
-
- # collect values for each project
- results = map(lambda project: [project.name] + map(project.get,
- fields),
- projects)
-
- # and then print
- if results:
- headers = ['project'] + fields
- print(tabulate(sorted(results), headers, tablefmt=table))
-
-
-main()
diff --git a/kubernetes/contrib/tools/registry-initialize.sh b/kubernetes/contrib/tools/registry-initialize.sh
deleted file mode 100755
index 798f375509..0000000000
--- a/kubernetes/contrib/tools/registry-initialize.sh
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/bin/sh -x
-
-# Copyright (c) 2021 AT&T. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pre-requisite
-# 1. Chart packages available under local directory provided as input/argument
-# 2. helm client installed with push plugin
-# 3. ONAP chartmuseum service deployed
-
-usage()
-{
- echo "Chart Base directory or helm chart from local repo must be provided as input!!"
- echo "Usage: registry-initialize.sh -d chartdirectory \
-<-n namespace override> <-r helmrelease override> <-p chart name prefix> | <-h helm charts from local repo>"
- exit 1
-}
-
-if [ $# -eq 0 ]; then
- usage
-fi
-
-# defaults
-NAMESPACE=onap
-RLS_NAME=onap
-LOGIN=""
-PASSWORD=""
-PREF=""
-HELM_REPO=local
-
-while getopts ":d:n:r:p:h:c:" opt; do
- case $opt in
- d) BASEDIR="$OPTARG"
- ;;
- n) NAMESPACE="$OPTARG"
- ;;
- r) RLS_NAME="$OPTARG"
- ;;
- p) PREF="$OPTARG"
- ;;
- h) HELM_CHART="$OPTARG"
- ;;
- c) HELM_REPO="$OPTARG"
- ;;
- \?) echo "Invalid option -$OPTARG" >&2
- usage
- ;;
- esac
-done
-
-
-if [ -z "$BASEDIR" ] && [ -z "$HELM_CHART" ] ; then
- echo "Chart base directory provided $BASEDIR and helm chart from local repo is empty"
- exit
-fi
-
-if [ -n "$BASEDIR" ] && [ -n "$HELM_CHART" ] ; then
- echo "Both chart base directory $BASEDIR and helm chart from local repo $HELM_CHART cannot be used at the same time "
- exit
-fi
-
-if [ -n "$BASEDIR" ]; then
- if [ "$(find $BASEDIR -maxdepth 1 -name '*tgz' -print -quit)" ]; then
- echo "$BASEDIR valid"
- else
- echo "No chart package on $BASEDIR provided"
- exit
- fi
-fi
-
-if [ -n "$HELM_CHART" ]; then
- tmp_location=$(mktemp -d)
- helm pull $HELM_REPO/$HELM_CHART -d $tmp_location
- if [ $? -eq 0 ]; then
- echo "Helm chart $HELM_CHART has been pulled out from in $HELM_REPO repo"
- BASEDIR=$tmp_location
- else
- echo "No chart package $HELM_CHART on $HELM_REPO repo"
- exit
- fi
-fi
-
-if [ -z "$PREF" ] && [ -z "$HELM_CHART" ] ; then
- PREF=dcae
-fi
-
-LOGIN=$(kubectl -n "$NAMESPACE" get secret \
- "${RLS_NAME}-chartmuseum-registrycred" \
- -o jsonpath='{.data.login}' | base64 -d)
-
-PASSWORD=$(kubectl -n "$NAMESPACE" get secret \
- "${RLS_NAME}-chartmuseum-registrycred" \
- -o jsonpath='{.data.password}' | base64 -d)
-
-if [ -z "$LOGIN" ] || [ -z "$PASSWORD" ]; then
- echo "Login/Password credential for target registry cannot be retrieved"
- exit 1
-fi
-
-# Expose cluster port via port-forwarding
-kubectl -n $NAMESPACE port-forward service/chart-museum 27017:80 &
-if [ $? -ne 0 ]; then
- echo "Error in port forwarding; registry cannot be added!!"
- exit 1
-fi
-
-sleep 5
-
-# Add chartmuseum repo as helm repo
-# Credentials should match config defined in
-# oom\kubernetes\platform\components\chartmuseum\values.yaml
-helm repo add k8s-registry http://127.0.0.1:27017 --username "$LOGIN" \
- --password "$PASSWORD"
-if [ $? -ne 0 ]; then
- echo "registry cannot be added!!"
- pkill -f "port-forward service/chart-museum"
- exit 1
-fi
-
-# Initial scope is pushing only dcae charts
-# can be expanded to include all onap charts if required
-for file in $BASEDIR/$PREF*tgz; do
- # use helm plugin to push charts
- helm cm-push -f $file k8s-registry
- if [ $? -eq 0 ]; then
- echo "$file uploaded to registry successfully"
- else
- echo "registry upload failed!!"
- pkill -f "port-forward service/chart-museum"
- helm repo remove k8s-registry
- exit 1
- fi
-done
-
-echo "All Helm charts successfully uploaded into internal repository"
-
-# Remove the port-forwarding process
-pkill -f "port-forward service/chart-museum"
-
-# Remove helm registry from local
-helm repo remove k8s-registry
diff --git a/kubernetes/contrib/tools/rke/rke_setup.sh b/kubernetes/contrib/tools/rke/rke_setup.sh
deleted file mode 100755
index a8938a96ee..0000000000
--- a/kubernetes/contrib/tools/rke/rke_setup.sh
+++ /dev/null
@@ -1,353 +0,0 @@
-#!/bin/sh
-
-#############################################################################
-# Copyright © 2019 Bell.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#############################################################################
-#
-# This installation is for an RKE install of kubernetes
-# after this run the standard oom install
-# this installation can be run on any ubuntu 16.04/18.04 VM, RHEL 7.6 (root only), physical or cloud azure/aws host
-# https://wiki.onap.org/display/DW/OOM+RKE+Kubernetes+Deployment
-# source from https://jira.onap.org/browse/OOM-1598
-#
-# master/dublin
-# RKE 0.1.16 Kubernetes 1.11.6, kubectl 1.11.6, Helm 2.9.1, Docker 18.06
-# 20190428 RKE 0.2.1, Kubernetes 1.13.5, kubectl 1.13.5, Helm 2.12.3, Docker 18.09.5
-# single node install, HA pending
-
-usage() {
-cat <<EOF
-Usage: $0 [PARAMs]
-example
-sudo ./rke_setup.sh -b master -s rke.onap.cloud -e onap -l amdocs -v true
--u : Display usage
--b [branch] : branch = master or dublin (required)
--s [server] : server = IP or DNS name (required)
--e [environment] : use the default (onap)
--k [key] : ssh key name
--l [username] : login username account (use ubuntu for example)
-EOF
-}
-
-install_onap() {
- #constants
- PORT=8880
- KUBERNETES_VERSION=
- RKE_VERSION=0.2.1
- KUBECTL_VERSION=1.13.5
- HELM_VERSION=2.12.3
- DOCKER_VERSION=18.09
-
- # copy your private ssh key and cluster.yml file to the vm
- # on your dev machine
- #sudo cp ~/.ssh/onap_rsa .
- #sudo chmod 777 onap_rsa
- #scp onap_rsa ubuntu@192.168.241.132:~/
- # on this vm
- #sudo chmod 400 onap_rsa
- #sudo cp onap_rsa ~/.ssh
- # make sure public key is insetup correctly in
- # sudo vi ~/.ssh/authorized_keys
-
- echo "please supply your ssh key as provided by the -k keyname - it must be be chmod 400 and chown user:user in ~/.ssh/"
- echo "The RKE version specific cluster.yaml is already integrated in this script for 0.2.1 no need for below generation..."
- echo "rke config --name cluster.yml"
- echo "specifically"
- echo "address: $SERVER"
- echo "user: $USERNAME"
- echo "ssh_key_path: $SSHPATH_PREFIX/$SSHKEY"
-
- RKETOOLS=
- HYPERCUBE=
- POD_INFRA_CONTAINER=
- RKETOOLS=0.1.27
- HYPERCUBE=1.13.5-rancher1
- POD_INFRA_CONTAINER=rancher/pause:3.1
-
- cat > cluster.yml <<EOF
-# generated from rke_setup.sh
-nodes:
-- address: $SERVER
- port: "22"
- internal_address: ""
- role:
- - controlplane
- - worker
- - etcd
- hostname_override: ""
- user: $USERNAME
- docker_socket: /var/run/docker.sock
- ssh_key: ""
- ssh_key_path: $SSHPATH_PREFIX/$SSHKEY
- ssh_cert: ""
- ssh_cert_path: ""
- labels: {}
-services:
- etcd:
- image: ""
- extra_args: {}
- extra_binds: []
- extra_env: []
- external_urls: []
- ca_cert: ""
- cert: ""
- key: ""
- path: ""
- snapshot: null
- retention: ""
- creation: ""
- backup_config: null
- kube-api:
- image: ""
- extra_args: {}
- extra_binds: []
- extra_env: []
- service_cluster_ip_range: 10.43.0.0/16
- service_node_port_range: ""
- pod_security_policy: false
- always_pull_images: false
- kube-controller:
- image: ""
- extra_args: {}
- extra_binds: []
- extra_env: []
- cluster_cidr: 10.42.0.0/16
- service_cluster_ip_range: 10.43.0.0/16
- scheduler:
- image: ""
- extra_args: {}
- extra_binds: []
- extra_env: []
- kubelet:
- image: ""
- extra_args:
- max-pods: 900
- extra_binds: []
- extra_env: []
- cluster_domain: cluster.local
- infra_container_image: ""
- cluster_dns_server: 10.43.0.10
- fail_swap_on: false
- kubeproxy:
- image: ""
- extra_args: {}
- extra_binds: []
- extra_env: []
-network:
- plugin: canal
- options: {}
-authentication:
- strategy: x509
- sans: []
- webhook: null
-system_images:
- etcd: rancher/coreos-etcd:v3.2.24-rancher1
- alpine: rancher/rke-tools:v$RKETOOLS
- nginx_proxy: rancher/rke-tools:v$RKETOOLS
- cert_downloader: rancher/rke-tools:v$RKETOOLS
- kubernetes_services_sidecar: rancher/rke-tools:v$RKETOOLS
- kubedns: rancher/k8s-dns-kube-dns:1.15.0
- dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0
- kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0
- kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
- kubernetes: rancher/hyperkube:v$HYPERCUBE
- flannel: rancher/coreos-flannel:v0.10.0-rancher1
- flannel_cni: rancher/flannel-cni:v0.3.0-rancher1
- calico_node: rancher/calico-node:v3.4.0
- calico_cni: rancher/calico-cni:v3.4.0
- calico_controllers: ""
- calico_ctl: rancher/calico-ctl:v2.0.0
- canal_node: rancher/calico-node:v3.4.0
- canal_cni: rancher/calico-cni:v3.4.0
- canal_flannel: rancher/coreos-flannel:v0.10.0
- wave_node: weaveworks/weave-kube:2.5.0
- weave_cni: weaveworks/weave-npc:2.5.0
- pod_infra_container: $POD_INFRA_CONTAINER
- ingress: rancher/nginx-ingress-controller:0.21.0-rancher3
- ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1
- metrics_server: rancher/metrics-server:v0.3.1
-ssh_key_path: $SSHPATH
-ssh_cert_path: ""
-ssh_agent_auth: false
-authorization:
- mode: rbac
- options: {}
-ignore_docker_version: false
-kubernetes_version: "$KUBERNETES_VERSION"
-private_registries: []
-ingress:
- provider: ""
- options: {}
- node_selector: {}
- extra_args: {}
-cluster_name: ""
-cloud_provider:
- name: ""
-prefix_path: ""
-addon_job_timeout: 0
-bastion_host:
- address: ""
- port: ""
- user: ""
- ssh_key: ""
- ssh_key_path: ""
- ssh_cert: ""
- ssh_cert_path: ""
-monitoring:
- provider: ""
- options: {}
-restore:
- restore: false
- snapshot_name: ""
-dns: null
-EOF
-
-
-
- echo "Installing on ${SERVER} for ${BRANCH}: RKE: ${RKE_VERSION} Kubectl: ${KUBECTL_VERSION} Helm: ${HELM_VERSION} Docker: ${DOCKER_VERSION} username: ${USERNAME}"
- sudo echo "127.0.0.1 ${SERVER}" >> /etc/hosts
- echo "Install docker - If you must install as non-root - comment out the docker install below - run it separately, run the user mod, logout/login and continue this script"
- curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
- sudo usermod -aG docker $USERNAME
-
- echo "Install RKE"
- sudo wget https://github.com/rancher/rke/releases/download/v$RKE_VERSION/rke_linux-amd64
- mv rke_linux-amd64 rke
- sudo chmod +x rke
- sudo mv ./rke /usr/local/bin/rke
-
- echo "Install make - required for beijing+ - installed via yum groupinstall Development Tools in RHEL"
- # ubuntu specific
- sudo apt-get install make -y
-
- sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
- sudo chmod +x ./kubectl
- sudo mv ./kubectl /usr/local/bin/kubectl
- sudo mkdir ~/.kube
- wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
- sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
- sudo mv linux-amd64/helm /usr/local/bin/helm
-
- echo "Bringing RKE up - using supplied cluster.yml"
- sudo rke up
- echo "wait 2 extra min for the cluster"
- sleep 60
- echo "1 more min"
- sleep 60
- echo "copy kube_config_cluter.yaml generated - to ~/.kube/config"
- sudo cp kube_config_cluster.yml ~/.kube/config
- # avoid using sudo for kubectl
- sudo chmod 777 ~/.kube/config
- echo "Verify all pods up on the kubernetes system - will return localhost:8080 until a host is added"
- echo "kubectl get pods --all-namespaces"
- kubectl get pods --all-namespaces
- echo "install tiller/helm"
- kubectl -n kube-system create serviceaccount tiller
- kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
- helm init --service-account tiller
- kubectl -n kube-system rollout status deploy/tiller-deploy
- echo "upgrade server side of helm in kubernetes"
- if [ "$USERNAME" = "root" ]; then
- helm version
- else
- sudo helm version
- fi
- echo "sleep 30"
- sleep 30
- if [ "$USERNAME" = "root" ]; then
- helm init --upgrade
- else
- sudo helm init --upgrade
- fi
- echo "sleep 30"
- sleep 30
- echo "verify both versions are the same below"
- if [ "$USERNAME" = "root" ]; then
- helm version
- else
- sudo helm version
- fi
- echo "start helm server"
- if [ "$USERNAME" = "root" ]; then
- helm serve &
- else
- sudo helm serve &
- fi
- echo "sleep 30"
- sleep 30
- echo "add local helm repo"
- if [ "$USERNAME" = "root" ]; then
- helm repo add local http://127.0.0.1:8879
- helm repo list
- else
- sudo helm repo add local http://127.0.0.1:8879
- sudo helm repo list
- fi
- echo "To enable grafana dashboard - do this after running cd.sh which brings up onap - or you may get a 302xx port conflict"
- echo "kubectl expose -n kube-system deployment monitoring-grafana --type=LoadBalancer --name monitoring-grafana-client"
- echo "to get the nodeport for a specific VM running grafana"
- echo "kubectl get services --all-namespaces | grep graf"
- sudo docker version
- helm version
- kubectl version
- kubectl get services --all-namespaces
- kubectl get pods --all-namespaces
- echo "finished!"
-}
-
-BRANCH=
-SERVER=
-ENVIRON=
-VALIDATE=false
-USERNAME=ubuntu
-SSHPATH_PREFIX=~/.ssh
-
-while getopts ":b:s:e:u:l:k:v" PARAM; do
- case $PARAM in
- u)
- usage
- exit 1
- ;;
- b)
- BRANCH=${OPTARG}
- ;;
- e)
- ENVIRON=${OPTARG}
- ;;
- s)
- SERVER=${OPTARG}
- ;;
- l)
- USERNAME=${OPTARG}
- ;;
- k)
- SSHKEY=${OPTARG}
- ;;
- v)
- VALIDATE=${OPTARG}
- ;;
- ?)
- usage
- exit
- ;;
- esac
-done
-
-if [ -z $BRANCH ]; then
- usage
- exit 1
-fi
-
-install_onap $BRANCH $SERVER $ENVIRON $USERNAME $SSHPATH_PREFIX $SSHKEY $VALIDATE