aboutsummaryrefslogtreecommitdiffstats
path: root/bootstrap/vagrant-onap/lib
diff options
context:
space:
mode:
Diffstat (limited to 'bootstrap/vagrant-onap/lib')
-rwxr-xr-xbootstrap/vagrant-onap/lib/_composed_functions11
-rwxr-xr-xbootstrap/vagrant-onap/lib/_onap_functions6
-rwxr-xr-xbootstrap/vagrant-onap/lib/aai35
-rwxr-xr-xbootstrap/vagrant-onap/lib/appc23
-rwxr-xr-xbootstrap/vagrant-onap/lib/ccsdk32
-rwxr-xr-xbootstrap/vagrant-onap/lib/commons100
-rwxr-xr-xbootstrap/vagrant-onap/lib/config/env-vars79
-rwxr-xr-xbootstrap/vagrant-onap/lib/dcae59
-rw-r--r--bootstrap/vagrant-onap/lib/files/all-in-one585
-rw-r--r--bootstrap/vagrant-onap/lib/files/globals.yml2
-rw-r--r--bootstrap/vagrant-onap/lib/files/kolla-build.conf5
-rw-r--r--bootstrap/vagrant-onap/lib/files/kubectl_config_generator.py40
-rw-r--r--bootstrap/vagrant-onap/lib/files/passwords.yml216
-rwxr-xr-xbootstrap/vagrant-onap/lib/functions223
-rwxr-xr-xbootstrap/vagrant-onap/lib/mr11
-rwxr-xr-xbootstrap/vagrant-onap/lib/msb50
-rwxr-xr-xbootstrap/vagrant-onap/lib/mso32
-rwxr-xr-xbootstrap/vagrant-onap/lib/multicloud47
-rwxr-xr-xbootstrap/vagrant-onap/lib/oom220
-rwxr-xr-xbootstrap/vagrant-onap/lib/openstack71
-rwxr-xr-xbootstrap/vagrant-onap/lib/policy25
-rwxr-xr-xbootstrap/vagrant-onap/lib/portal17
-rwxr-xr-xbootstrap/vagrant-onap/lib/robot38
-rwxr-xr-xbootstrap/vagrant-onap/lib/sdc45
-rwxr-xr-xbootstrap/vagrant-onap/lib/sdnc19
-rwxr-xr-xbootstrap/vagrant-onap/lib/vfc28
-rwxr-xr-xbootstrap/vagrant-onap/lib/vid27
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/lib/vnfsdk23
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/lib/vvp23
29 files changed, 1605 insertions, 487 deletions
diff --git a/bootstrap/vagrant-onap/lib/_composed_functions b/bootstrap/vagrant-onap/lib/_composed_functions
index f46499766..9f2d0a1d8 100755
--- a/bootstrap/vagrant-onap/lib/_composed_functions
+++ b/bootstrap/vagrant-onap/lib/_composed_functions
@@ -1,16 +1,5 @@
#!/bin/bash
-# compile_src() - Function that compiles the java source code thru maven
-function compile_src {
- local src_folder=$1
- pushd $src_folder
- if [ -f pom.xml ]; then
- install_maven
- mvn clean install -U -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dadditionalparam=-Xdoclint:none
- fi
- popd
-}
-
# build_docker_image() - Build Docker container image from source code
function build_docker_image {
local src_folder=$1
diff --git a/bootstrap/vagrant-onap/lib/_onap_functions b/bootstrap/vagrant-onap/lib/_onap_functions
index cedd6f0a3..960b298ef 100755
--- a/bootstrap/vagrant-onap/lib/_onap_functions
+++ b/bootstrap/vagrant-onap/lib/_onap_functions
@@ -15,7 +15,7 @@ function create_configuration_files {
# docker_openecomp_login() - Login to OpenECOMP Docker Hub
function docker_openecomp_login {
install_docker
- docker login -u $nexus_username -p $nexus_password $nexus_docker_repo
+ docker login -u ${nexus_username:-docker} -p ${nexus_password:-docker} ${nexus_docker_repo:-nexus3.onap.org:10001}
}
# pull_openecomp_image() - Pull Docker container image from a Docker Registry Hub
@@ -23,7 +23,7 @@ function pull_openecomp_image {
local image=$1
local tag=$2
docker_openecomp_login
- pull_docker_image $nexus_docker_repo/openecomp/${image}:${docker_version-latest} $tag
+ pull_docker_image ${nexus_docker_repo:-nexus3.onap.org:10001}/openecomp/${image}:${docker_version-latest} $tag
docker logout
}
@@ -32,7 +32,7 @@ function pull_onap_image {
local image=$1
local tag=$2
docker_openecomp_login
- pull_docker_image $nexus_docker_repo/onap/${image}:${docker_version-latest} $tag
+ pull_docker_image ${nexus_docker_repo:-nexus3.onap.org:10001}/onap/${image}:${docker_version-latest} $tag
docker logout
}
diff --git a/bootstrap/vagrant-onap/lib/aai b/bootstrap/vagrant-onap/lib/aai
index d267e020d..7a68fbbf4 100755
--- a/bootstrap/vagrant-onap/lib/aai
+++ b/bootstrap/vagrant-onap/lib/aai
@@ -1,16 +1,8 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
hbase_version=1.2.3
-aai_src_folder=$git_src_folder/aai
-aai_repos=("aai/aai-common" "aai/aai-config" "aai/aai-data" "aai/aai-service" \
-"aai/babel" "aai/champ" "aai/data-router" "aai/esr-gui" "aai/esr-server" \
-"aai/gizmo" "aai/logging-service" "aai/model-loader" "aai/resources" \
-"aai/rest-client" "aai/router-core" "aai/search-data-service" \
-"aai/test-config" "aai/traversal" "aai/sparky-fe" "aai/sparky-be")
# install_hadoop() - Function that installs Hadoop
function install_hadoop {
@@ -53,18 +45,12 @@ function install_haproxy {
service haproxy restart
}
-# clone_all_aai_repos() - Function that clones AAI source repo.
-function clone_all_aai_repos {
- for repo in ${aai_repos[@]}; do
- clone_repo $repo $aai_src_folder${repo#*aai}
- done
-}
# compile_aai_repos() - Function that compiles AAI source repo.
function compile_aai_repos {
- local repos=("aai/aai-common" "aai/resources" "aai/logging-service" "aai/traversal")
+ local repos="aai/aai-common aai/resources aai/logging-service aai/traversal"
if [[ "$compile_repo" == "True" ]]; then
- repos=("${aai_repos[@]}")
+ repos="${repos[aai]}"
fi
for repo in ${repos[@]}; do
@@ -103,17 +89,12 @@ function start_aai_microservices {
done
}
-# _pull_hbase_image() - Pull HBase container image from a Docker Registry Hub
-function _pull_hbase_image {
- docker_openecomp_login
- docker pull $nexus_docker_repo/aaidocker/aai-hbase-${hbase_version}
-}
-
# install_hbase() - Install HBase Service
function install_hbase {
docker rm -f hbase
- _pull_hbase_image
- docker run -d --net=host --name="hbase" $nexus_docker_repo/aaidocker/aai-hbase-${hbase_version}
+ docker_openecomp_login
+ docker pull $nexus_docker_repo/aaidocker/aai-hbase-${hbase_version}
+ run_docker_image -d --net=host --name="hbase" $nexus_docker_repo/aaidocker/aai-hbase-${hbase_version}
}
# install_ajsc() - Install ASJC Java service container
@@ -131,7 +112,7 @@ EOL
pull_openecomp_image ajsc-aai
fi
- docker run --env-file /etc/ajsc-aai.conf --name=aai-service --net=host -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt -it -d $nexus_docker_repo/openecomp/ajsc-aai:$docker_version
+ run_docker_image --env-file /etc/ajsc-aai.conf --name=aai-service --net=host -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt -it -d $nexus_docker_repo/openecomp/ajsc-aai:$docker_version
}
# install_model_loader() Install Model Loader
@@ -160,7 +141,7 @@ EOL
pull_openecomp_image model-loader
ARGS+="--name=model-loader-service -it -d --env-file /etc/model-loader.conf $nexus_docker_repo/openecomp/model-loader:$docker_version"
fi
- docker run ${ARGS}
+ run_docker_image ${ARGS}
}
# _wait_for_sdc() - Function that determines if SDC is up and running
@@ -184,7 +165,7 @@ function init_aai {
install_haproxy
if [[ "$clone_repo" == "True" ]]; then
- clone_all_aai_repos
+ clone_repos "aai"
if [[ "$compile_repo" == "True" ]]; then
compile_aai_repos
fi
diff --git a/bootstrap/vagrant-onap/lib/appc b/bootstrap/vagrant-onap/lib/appc
index 4d93f54e5..ad01ca53e 100755
--- a/bootstrap/vagrant-onap/lib/appc
+++ b/bootstrap/vagrant-onap/lib/appc
@@ -1,27 +1,8 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/sdnc
source /var/onap/functions
-appc_src_folder=$git_src_folder/appc
-appc_repos=("appc" "appc/deployment")
-
-# clone_all_appc_repos() - Function that clones APPC source repo.
-function clone_all_appc_repos {
- for repo in ${appc_repos[@]}; do
- clone_repo $repo $appc_src_folder${repo#*appc}
- done
-}
-
-# compile_all_appc_repos() - Function that compiles APPC source repo.
-function compile_all_appc_repos {
- for repo in ${appc_repos[@]}; do
- compile_src $appc_src_folder${repo#*appc}
- done
-}
-
# _build_appc_images() - Function that creates APPC images from source code.
function _build_appc_images {
get_sdnc_images
@@ -49,9 +30,9 @@ function install_appc {
# init_appc() - Function that initialize APPC services
function init_appc {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_appc_repos
+ clone_repos "appc"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_appc_repos
+ compile_repos "appc"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/ccsdk b/bootstrap/vagrant-onap/lib/ccsdk
index 03987ea7e..112839c69 100755
--- a/bootstrap/vagrant-onap/lib/ccsdk
+++ b/bootstrap/vagrant-onap/lib/ccsdk
@@ -1,35 +1,10 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-ccsdk_src_folder=$git_src_folder/ccsdk
-ccsdk_repos=("ccsdk" "ccsdk/dashboard" "ccsdk/distribution" "ccsdk/parent"
-"ccsdk/platform/blueprints" "ccsdk/platform/nbapi" "ccsdk/platform/plugins"
-"ccsdk/sli" "ccsdk/sli/adaptors" "ccsdk/sli/core" "ccsdk/sli/northbound"
-"ccsdk/sli/plugins" "ccsdk/storage" "ccsdk/storage/esaas" "ccsdk/storage/pgaas"
-"ccsdk/utils")
-
-# clone_ccsdk_repos() - Function that clones the CCSDK repositories
-function clone_ccsdk_repos {
- for repo in ${ccsdk_repos[@]}; do
- clone_repo $repo $ccsdk_src_folder${repo#ccsdk}
- done
-}
-
-# compile_ccsdk_repos() -
-function compile_ccsdk_repos {
- install_package unzip
- for repo in ${ccsdk_repos[@]}; do
- compile_src $ccsdk_src_folder${repo#ccsdk}
- done
-}
-
# _build_ccsdk_images() - Build CCSDK Docker images from source code
function _build_ccsdk_images {
install_package unzip
- clone_repo ccsdk/distribution $ccsdk_src_folder/distribution
compile_src $ccsdk_src_folder/distribution
for image in ubuntu opendaylight odlsli dgbuilder-docker; do
build_docker_image $ccsdk_src_folder/distribution/$image docker
@@ -50,9 +25,12 @@ function get_ccsdk_images {
# init_ccsdk() - Function that initialize Multi Cloud services
function init_ccsdk {
if [[ "$clone_repo" == "True" ]]; then
- clone_ccsdk_repos
+ clone_repos "ccsdk"
if [[ "$compile_repo" == "True" ]]; then
- compile_ccsdk_repos
+ compile_repos "ccsdk"
fi
fi
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_ccsdk_images
+ fi
}
diff --git a/bootstrap/vagrant-onap/lib/commons b/bootstrap/vagrant-onap/lib/commons
index 26e2cc26a..5d0c69108 100755
--- a/bootstrap/vagrant-onap/lib/commons
+++ b/bootstrap/vagrant-onap/lib/commons
@@ -1,26 +1,27 @@
#!/bin/bash
-set -o xtrace
-
# update_repos() - Function that updates linux repositories
function update_repos {
+ echo "Updating repositories list..."
if [ -f /var/onap/files/sources.list ]; then
cp /var/onap/files/sources.list /etc/apt/sources.list
fi
- if [ -f /var/onap/files/proxyrc ]; then
- source /var/onap/files/proxyrc
- cp /var/onap/files/proxyrc /etc/profile.d/proxy.sh
-
- if [ -f /etc/apt/apt.conf ]; then
- echo "Acquire::http::Proxy \"${http_proxy}\";" >> /etc/apt/apt.conf
- echo "Acquire::https::Proxy \"${https_proxy}\";" >> /etc/apt/apt.conf
- fi
- if [ -d /etc/apt/apt.conf.d ] & [ ! -f /etc/apt/apt.conf.d/70proxy.conf ]; then
- echo "Acquire::http::Proxy \"${http_proxy}\";" >> /etc/apt/apt.conf.d/70proxy.conf
- echo "Acquire::https::Proxy \"${https_proxy}\";" >> /etc/apt/apt.conf.d/70proxy.conf
- fi
- fi
- apt-get update -qq -y
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ zypper -n ref
+ ;;
+ ubuntu|debian)
+ if [[ "$debug" == "False" ]]; then
+ apt-get update > /dev/null
+ else
+ apt-get update
+ fi
+ ;;
+ rhel|centos|fedora)
+ yum updateinfo
+ ;;
+ esac
}
# is_package_installed() - Function to tell if a package is installed
@@ -28,35 +29,88 @@ function is_package_installed {
if [[ -z "$@" ]]; then
return 1
fi
- dpkg -s "$@" > /dev/null 2> /dev/null
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ dpkg -s "$@" > /dev/null
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
}
# install_packages() - Install a list of packages
function install_packages {
local package=$@
- update_repos
- apt-get install -y -qq $package
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get install -y -qq $package
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
}
# install_package() - Install specific package if doesn't exist
function install_package {
local package=$1
+
if ! is_package_installed $package; then
- update_repos
- apt-get install -y -qq $package
+ echo "Installing $package..."
+
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ zypper install -y $package
+ ;;
+ ubuntu|debian)
+ if [[ "$debug" == "False" ]]; then
+ apt-get install -y -qq -o=Dpkg::Use-Pty=0 $package
+ else
+ apt-get install -y $package
+ fi
+ ;;
+ rhel|centos|fedora)
+ PKG_MANAGER=$(which dnf || which yum)
+ ${PKG_MANAGER} -y install $package
+ ;;
+ esac
fi
}
# uninstall_packages() - Uninstall a list of packages
function uninstall_packages {
local packages=$@
- apt-get purge -y -qq $packages
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get purge -y -qq $packages
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
}
# uninstall_package() - Uninstall specific package if exists
function uninstall_package {
local package=$1
if is_package_installed $package; then
- apt-get purge -y -qq $package
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get purge -y -qq $package
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
fi
}
diff --git a/bootstrap/vagrant-onap/lib/config/env-vars b/bootstrap/vagrant-onap/lib/config/env-vars
new file mode 100755
index 000000000..a55557ae7
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/config/env-vars
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+# Source code destination folder
+git_src_folder=/opt
+
+declare -A src_folders
+src_folders=(
+["aai"]="$git_src_folder/aai"
+["appc"]="$git_src_folder/appc"
+["ccsdk"]="$git_src_folder/ccsdk"
+["dcae"]="$git_src_folder/dcae"
+["mr"]="$git_src_folder/dcae/message-router"
+["msb"]="$git_src_folder/msb"
+["mso"]="$git_src_folder/mso"
+["multicloud"]="$git_src_folder/multicloud"
+["oom"]="$git_src_folder/oom"
+["policy"]="$git_src_folder/policy"
+["portal"]="$git_src_folder/portal"
+["robot"]="$git_src_folder/testsuite"
+["sdc"]="$git_src_folder/sdc"
+["sdnc"]="$git_src_folder/openecomp/sdnc"
+["vfc"]="$git_src_folder/vfc"
+["vid"]="$git_src_folder/vid"
+["vnfsdk"]="$git_src_folder/vnfsdk"
+["vvp"]="$git_src_folder/vvp"
+)
+
+# Repositories list
+declare -A repos
+repos=(
+["aai"]="aai/aai-common aai/aai-config aai/aai-data aai/aai-service \
+aai/babel aai/champ aai/data-router aai/esr-gui aai/esr-server aai/gizmo \
+aai/logging-service aai/model-loader aai/resources aai/rest-client \
+aai/router-core aai/search-data-service aai/test-config aai/traversal \
+aai/sparky-fe aai/sparky-be"
+["appc"]="appc appc/deployment"
+["ccsdk"]="ccsdk ccsdk/dashboard ccsdk/distribution ccsdk/parent \
+ccsdk/platform/blueprints ccsdk/platform/nbapi \
+ccsdk/platform/plugins ccsdk/sli ccsdk/sli/adaptors ccsdk/sli/core \
+ccsdk/sli/northbound ccsdk/sli/plugins ccsdk/storage \
+ccsdk/storage/esaas ccsdk/storage/pgaas ccsdk/utils"
+["dcae"]="dcae dcae/apod dcae/apod/analytics dcae/apod/buildtools \
+dcae/apod/cdap dcae/collectors dcae/collectors/ves dcae/controller \
+dcae/controller/analytics dcae/dcae-inventory dcae/demo \
+dcae/demo/startup dcae/demo/startup/aaf dcae/demo/startup/controller \
+dcae/demo/startup/message-router dcae/dmaapbc dcae/operation \
+dcae/operation/utils dcae/orch-dispatcher dcae/pgaas dcae/utils \
+dcae/utils/buildtools"
+["msb"]="msb/apigateway msb/discovery msb/java-sdk msb/swagger-sdk"
+["mso"]="mso mso/chef-repo mso/docker-config mso/libs mso/mso-config"
+["multicloud"]="multicloud multicloud/framework multicloud/openstack \
+multicloud/openstack/vmware multicloud/openstack/windriver \
+multicloud/azure"
+["oom"]="oom oom/registrator"
+["policy"]="policy/api policy/common policy/docker \
+policy/drools-applications policy/drools-pdp policy/engine \
+policy/gui policy/pap policy/pdp"
+["portal"]="portal portal/sdk ecompsdkos ui/dmaapbc"
+["robot"]="testsuite testsuite/heatbridge testsuite/properties \
+testsuite/python-testing-utils"
+["sdc"]="sdc sdc/jtosca sdc/sdc-distribution-client \
+sdc/sdc-docker-base sdc/sdc-titan-cassandra sdc/sdc-tosca \
+sdc/sdc-vnfdesign sdc/sdc-workflow-designer sdc/sdc_common"
+["sdnc"]="sdnc/adaptors sdnc/architecture sdnc/core sdnc/features \
+sdnc/northbound sdnc/oam sdnc/parent sdnc/plugins"
+["vfc"]="vfc/gvnfm vfc/gvnfm/vnflcm vfc/gvnfm/vnfmgr \
+vfc/gvnfm/vnfres vfc/nfvo vfc/nfvo/catalog vfc/nfvo/driver \
+vfc/nfvo/driver/ems vfc/nfvo/driver/sfc vfc/nfvo/driver/vnfm \
+vfc/nfvo/driver/vnfm/gvnfm vfc/nfvo/driver/vnfm/svnfm vfc/nfvo/lcm \
+vfc/nfvo/resmanagement vfc/nfvo/wfengine"
+["vid"]="vid vid/asdcclient"
+["vnfsdk"]="vnfsdk/compliance vnfsdk/functest vnfsdk/lctest \
+vnfsdk/model vnfsdk/pkgtools vnfsdk/refrepo vnfsdk/validation"
+["vvp"]="vvp/ansible-ice-bootstrap vvp/cms vvp/devkit \
+vvp/documentation vvp/engagementmgr vvp/gitlab vvp/image-scanner \
+vvp/jenkins vvp/portal vvp/postgresql vvp/test-engine \
+vvp/validation-scripts"
+)
+
diff --git a/bootstrap/vagrant-onap/lib/dcae b/bootstrap/vagrant-onap/lib/dcae
index 78ca8de06..d6fdd89b2 100755
--- a/bootstrap/vagrant-onap/lib/dcae
+++ b/bootstrap/vagrant-onap/lib/dcae
@@ -1,17 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-dcae_src_folder=$git_src_folder/dcae
-dcae_repos=("dcae" "dcae/apod" "dcae/apod/analytics" "dcae/apod/buildtools"
-"dcae/apod/cdap" "dcae/collectors" "dcae/collectors/ves" "dcae/controller"
-"dcae/controller/analytics" "dcae/dcae-inventory" "dcae/demo"
-"dcae/demo/startup" "dcae/demo/startup/aaf" "dcae/demo/startup/controller"
-"dcae/demo/startup/message-router" "dcae/dmaapbc" "dcae/operation"
-"dcae/operation/utils" "dcae/orch-dispatcher" "dcae/pgaas" "dcae/utils" "dcae/utils/buildtools")
-
# _create_config_file() - Creates a configuration yaml file for the controller
function _create_config_file {
cat > $dcae_src_folder/controller/config.yaml << EOL
@@ -41,50 +31,13 @@ GIT-MR-REPO: http://gerrit.onap.org/r/dcae/demo/startup/message-router.git
EOL
}
-# _build_docker_image() - Function that clones, compiles and build a Docker image
-function _build_docker_image {
- local src_folder=$1
- local name=$2
- install_docker
-
- pushd $src_folder
- # NOTE: Workaround for dmmapbc images
- sed -i '/LocalKey/d' Dockerfile
- local docker_build="docker build -t $name -f ./Dockerfile ."
- if [ $http_proxy ]; then
- docker_build+=" --build-arg http_proxy=$http_proxy"
- fi
- if [ $https_proxy ]; then
- docker_build+=" --build-arg https_proxy=$https_proxy"
- fi
- eval $docker_build
- popd
-}
-
-# clone_all_dcae_repos() - Function that clones DCAE source repo.
-function clone_all_dcae_repos {
- for repo in ${dcae_repos[@]}; do
- clone_repo $repo $dcae_src_folder${repo#*dcae}
- done
-}
-
-# compile_all_dcae_repos() - Function that compiles DCAE source repo.
-function compile_all_dcae_repos {
- for repo in ${dcae_repos[@]}; do
- compile_src $dcae_src_folder${repo#*dcae}
- done
-}
-
# _build_dcae_images() Function that builds DCAE docker images from source code.
function _build_dcae_images {
if [[ "$compile_repo" != "True" ]]; then
- compile_src $dcae_src_folder/dmaapbc
- compile_src $dcae_src_folder/orch-dispatcher
- compile_src $dcae_src_folder/demo
- compile_src $dcae_src_folder/dcae-inventory
+ compile_repos "dcae"
fi
- _build_docker_image $dcae_src_folder/dmaapbc openecomp/dcae-dmaapbc
- _build_docker_image $dcae_src_folder/orch-dispatcher dcae/orch-dispatcher
+ build_docker_image $dcae_src_folder/dmaapbc openecomp/dcae-dmaapbc
+ build_docker_image $dcae_src_folder/orch-dispatcher dcae/orch-dispatcher
pushd $dcae_src_folder/demo
bash dcae-demo-controller/src/main/docker-build/build.sh
@@ -118,15 +71,15 @@ function install_dcae {
make up
fi
popd
- # docker run -p 8080:8080 -d -v <some local directory>/config.yml:/opt/config.yml --name dcae-inventory <docker registry>/dcae-inventory:<version>
+ # run_docker_image -p 8080:8080 -d -v <some local directory>/config.yml:/opt/config.yml --name dcae-inventory <docker registry>/dcae-inventory:<version>
}
# init_dcae() - Function that initialize DCAE Controller services
function init_dcae {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_dcae_repos
+ clone_repos "dcae"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_dcae_repos
+ compile_repos "dcae"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/files/all-in-one b/bootstrap/vagrant-onap/lib/files/all-in-one
new file mode 100644
index 000000000..efdb2bfce
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/all-in-one
@@ -0,0 +1,585 @@
+# These initial groups are the only groups required to be modified. The
+# additional groups are for more control of the environment.
+[control]
+localhost ansible_connection=local
+
+[network]
+localhost ansible_connection=local
+
+[compute]
+localhost ansible_connection=local
+
+[storage]
+localhost ansible_connection=local
+
+[monitoring]
+localhost ansible_connection=local
+
+[deployment]
+localhost ansible_connection=local
+
+# You can explicitly specify which hosts run each project by updating the
+# groups in the sections below. Common services are grouped together.
+[chrony-server:children]
+haproxy
+
+[chrony:children]
+network
+compute
+storage
+monitoring
+
+[collectd:children]
+compute
+
+[baremetal:children]
+control
+
+[grafana:children]
+monitoring
+
+[etcd:children]
+control
+compute
+
+[karbor:children]
+control
+
+[kibana:children]
+control
+
+[telegraf:children]
+compute
+control
+monitoring
+network
+storage
+
+[elasticsearch:children]
+control
+
+[haproxy:children]
+network
+
+[hyperv]
+#hyperv_host
+
+[hyperv:vars]
+#ansible_user=user
+#ansible_password=password
+#ansible_port=5986
+#ansible_connection=winrm
+#ansible_winrm_server_cert_validation=ignore
+
+[mariadb:children]
+control
+
+[rabbitmq:children]
+control
+
+[outward-rabbitmq:children]
+control
+
+[qdrouterd:children]
+control
+
+[mongodb:children]
+control
+
+[keystone:children]
+control
+
+[glance:children]
+control
+
+[nova:children]
+control
+
+[neutron:children]
+network
+
+[openvswitch:children]
+network
+compute
+manila-share
+
+[opendaylight:children]
+network
+
+[cinder:children]
+control
+
+[cloudkitty:children]
+control
+
+[freezer:children]
+control
+
+[memcached:children]
+control
+
+[horizon:children]
+control
+
+[swift:children]
+control
+
+[barbican:children]
+control
+
+[heat:children]
+control
+
+[murano:children]
+control
+
+[ceph:children]
+control
+
+[ironic:children]
+control
+
+[influxdb:children]
+monitoring
+
+[magnum:children]
+control
+
+[sahara:children]
+control
+
+[solum:children]
+control
+
+[mistral:children]
+control
+
+[manila:children]
+control
+
+[panko:children]
+control
+
+[gnocchi:children]
+control
+
+[ceilometer:children]
+control
+
+[aodh:children]
+control
+
+[congress:children]
+control
+
+[tacker:children]
+control
+
+# Tempest
+[tempest:children]
+control
+
+[senlin:children]
+control
+
+[vmtp:children]
+control
+
+[trove:children]
+control
+
+[watcher:children]
+control
+
+[rally:children]
+control
+
+[searchlight:children]
+control
+
+[octavia:children]
+control
+
+[designate:children]
+control
+
+[placement:children]
+control
+
+[bifrost:children]
+deployment
+
+[zun:children]
+control
+
+[skydive:children]
+monitoring
+
+[redis:children]
+control
+
+# Additional control implemented here. These groups allow you to control which
+# services run on which hosts at a per-service level.
+#
+# Word of caution: Some services are required to run on the same host to
+# function appropriately. For example, neutron-metadata-agent must run on the
+# same host as the l3-agent and (depending on configuration) the dhcp-agent.
+
+# Glance
+[glance-api:children]
+glance
+
+[glance-registry:children]
+glance
+
+# Nova
+[nova-api:children]
+nova
+
+[nova-conductor:children]
+nova
+
+[nova-consoleauth:children]
+nova
+
+[nova-novncproxy:children]
+nova
+
+[nova-scheduler:children]
+nova
+
+[nova-spicehtml5proxy:children]
+nova
+
+[nova-compute-ironic:children]
+nova
+
+[nova-serialproxy:children]
+nova
+
+# Neutron
+[neutron-server:children]
+control
+
+[neutron-dhcp-agent:children]
+neutron
+
+[neutron-l3-agent:children]
+neutron
+
+[neutron-lbaas-agent:children]
+neutron
+
+[neutron-metadata-agent:children]
+neutron
+
+[neutron-vpnaas-agent:children]
+neutron
+
+[neutron-bgp-dragent:children]
+neutron
+
+# Ceph
+[ceph-mon:children]
+ceph
+
+[ceph-rgw:children]
+ceph
+
+[ceph-osd:children]
+storage
+
+# Cinder
+[cinder-api:children]
+cinder
+
+[cinder-backup:children]
+storage
+
+[cinder-scheduler:children]
+cinder
+
+[cinder-volume:children]
+storage
+
+# Cloudkitty
+[cloudkitty-api:children]
+cloudkitty
+
+[cloudkitty-processor:children]
+cloudkitty
+
+# Freezer
+[freezer-api:children]
+freezer
+
+# iSCSI
+[iscsid:children]
+compute
+storage
+ironic-conductor
+
+[tgtd:children]
+storage
+
+# Karbor
+[karbor-api:children]
+karbor
+
+[karbor-protection:children]
+karbor
+
+[karbor-operationengine:children]
+karbor
+
+# Manila
+[manila-api:children]
+manila
+
+[manila-scheduler:children]
+manila
+
+[manila-share:children]
+network
+
+[manila-data:children]
+manila
+
+# Swift
+[swift-proxy-server:children]
+swift
+
+[swift-account-server:children]
+storage
+
+[swift-container-server:children]
+storage
+
+[swift-object-server:children]
+storage
+
+# Barbican
+[barbican-api:children]
+barbican
+
+[barbican-keystone-listener:children]
+barbican
+
+[barbican-worker:children]
+barbican
+
+# Trove
+[trove-api:children]
+trove
+
+[trove-conductor:children]
+trove
+
+[trove-taskmanager:children]
+trove
+
+# Heat
+[heat-api:children]
+heat
+
+[heat-api-cfn:children]
+heat
+
+[heat-engine:children]
+heat
+
+# Murano
+[murano-api:children]
+murano
+
+[murano-engine:children]
+murano
+
+# Ironic
+[ironic-api:children]
+ironic
+
+[ironic-conductor:children]
+ironic
+
+[ironic-inspector:children]
+ironic
+
+[ironic-pxe:children]
+ironic
+
+# Magnum
+[magnum-api:children]
+magnum
+
+[magnum-conductor:children]
+magnum
+
+# Solum
+[solum-api:children]
+solum
+
+[solum-worker:children]
+solum
+
+[solum-deployer:children]
+solum
+
+[solum-conductor:children]
+solum
+
+# Mistral
+[mistral-api:children]
+mistral
+
+[mistral-executor:children]
+mistral
+
+[mistral-engine:children]
+mistral
+
+# Aodh
+[aodh-api:children]
+aodh
+
+[aodh-evaluator:children]
+aodh
+
+[aodh-listener:children]
+aodh
+
+[aodh-notifier:children]
+aodh
+
+# Panko
+[panko-api:children]
+panko
+
+# Gnocchi
+[gnocchi-api:children]
+gnocchi
+
+[gnocchi-statsd:children]
+gnocchi
+
+[gnocchi-metricd:children]
+gnocchi
+
+# Sahara
+[sahara-api:children]
+sahara
+
+[sahara-engine:children]
+sahara
+
+# Ceilometer
+[ceilometer-api:children]
+ceilometer
+
+[ceilometer-central:children]
+ceilometer
+
+[ceilometer-notification:children]
+ceilometer
+
+[ceilometer-collector:children]
+ceilometer
+
+[ceilometer-compute:children]
+compute
+
+# Congress
+[congress-api:children]
+congress
+
+[congress-datasource:children]
+congress
+
+[congress-policy-engine:children]
+congress
+
+# Multipathd
+[multipathd:children]
+compute
+
+# Watcher
+[watcher-api:children]
+watcher
+
+[watcher-engine:children]
+watcher
+
+[watcher-applier:children]
+watcher
+
+# Senlin
+[senlin-api:children]
+senlin
+
+[senlin-engine:children]
+senlin
+
+# Searchlight
+[searchlight-api:children]
+searchlight
+
+[searchlight-listener:children]
+searchlight
+
+# Octavia
+[octavia-api:children]
+octavia
+
+[octavia-health-manager:children]
+octavia
+
+[octavia-housekeeping:children]
+octavia
+
+[octavia-worker:children]
+octavia
+
+# Designate
+[designate-api:children]
+designate
+
+[designate-central:children]
+designate
+
+[designate-mdns:children]
+network
+
+[designate-worker:children]
+designate
+
+[designate-sink:children]
+designate
+
+[designate-backend-bind9:children]
+designate
+
+# Placement
+[placement-api:children]
+placement
+
+# Zun
+[zun-api:children]
+zun
+
+[zun-compute:children]
+compute
+
+# Skydive
+[skydive-analyzer:children]
+skydive
+
+[skydive-agent:children]
+compute
+network
+
+# Tacker
+[tacker-server:children]
+tacker
+
+[tacker-conductor:children]
+tacker
diff --git a/bootstrap/vagrant-onap/lib/files/globals.yml b/bootstrap/vagrant-onap/lib/files/globals.yml
new file mode 100644
index 000000000..d10cc3d83
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/globals.yml
@@ -0,0 +1,2 @@
+---
+openstack_release: "master"
diff --git a/bootstrap/vagrant-onap/lib/files/kolla-build.conf b/bootstrap/vagrant-onap/lib/files/kolla-build.conf
new file mode 100644
index 000000000..8dd14e6c6
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/kolla-build.conf
@@ -0,0 +1,5 @@
+[DEFAULT]
+base = ubuntu
+profile = main
+
+[profiles]
diff --git a/bootstrap/vagrant-onap/lib/files/kubectl_config_generator.py b/bootstrap/vagrant-onap/lib/files/kubectl_config_generator.py
new file mode 100644
index 000000000..6b5a6e9f6
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/kubectl_config_generator.py
@@ -0,0 +1,40 @@
+import requests
+import os
+import base64
+
+RANCHER_URL = str(os.environ['RANCHER_URL'])
+RANCHER_ENVIRONMENT_ID = str(os.environ['RANCHER_ENVIRONMENT'])
+data = requests.post(RANCHER_URL + '/v1/projects/' + RANCHER_ENVIRONMENT_ID + '/apikeys',
+ {"accountId": RANCHER_ENVIRONMENT_ID,
+ "description": "ONAP on Kubernetes",
+ "name": "ONAP on Kubernetes",
+ "publicValue": "string",
+ "secretValue": "password"})
+json_dct = data.json()
+access_key = json_dct['publicValue']
+secret_key = json_dct['secretValue']
+auth_header = 'Basic ' + base64.b64encode(access_key + ':' + secret_key)
+token = "\"" + str(base64.b64encode(auth_header)) + "\""
+dct = \
+"""
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "{}/r/projects/{}/kubernetes:6443"
+ name: "onap_on_kubernetes"
+contexts:
+- context:
+ cluster: "onap_on_kubernetes"
+ user: "onap_on_kubernetes"
+ name: "onap_on_kubernetes"
+current-context: "onap_on_kubernetes"
+users:
+- name: "onap_on_kubernetes"
+ user:
+ token: {}
+""".format(RANCHER_URL, RANCHER_ENVIRONMENT_ID, token)
+with open("config", "w") as file:
+ file.write(dct)
diff --git a/bootstrap/vagrant-onap/lib/files/passwords.yml b/bootstrap/vagrant-onap/lib/files/passwords.yml
new file mode 100644
index 000000000..f376e31f0
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/passwords.yml
@@ -0,0 +1,216 @@
+---
+###################
+# Ceph options
+####################
+# These options must be UUID4 values in string format
+# XXXXXXXX-XXXX-4XXX-XXXX-XXXXXXXXXXXX
+ceph_cluster_fsid:
+ceph_rgw_keystone_password:
+# for backward compatible consideration, rbd_secret_uuid is only used for nova,
+# cinder_rbd_secret_uuid is used for cinder
+rbd_secret_uuid:
+cinder_rbd_secret_uuid:
+
+###################
+# Database options
+####################
+database_password:
+
+####################
+# Docker options
+####################
+# This should only be set if you require a password for your Docker registry
+docker_registry_password:
+
+######################
+# OpenDaylight options
+######################
+opendaylight_password:
+
+####################
+# OpenStack options
+####################
+aodh_database_password:
+aodh_keystone_password:
+
+barbican_database_password:
+barbican_keystone_password:
+barbican_p11_password:
+barbican_crypto_key:
+
+keystone_admin_password:
+keystone_database_password:
+
+grafana_database_password:
+grafana_admin_password:
+
+glance_database_password:
+glance_keystone_password:
+
+gnocchi_database_password:
+gnocchi_keystone_password:
+
+karbor_database_password:
+karbor_keystone_password:
+karbor_openstack_infra_id:
+
+kuryr_keystone_password:
+
+nova_database_password:
+nova_api_database_password:
+nova_keystone_password:
+
+placement_keystone_password:
+
+neutron_database_password:
+neutron_keystone_password:
+metadata_secret:
+
+cinder_database_password:
+cinder_keystone_password:
+
+cloudkitty_database_password:
+cloudkitty_keystone_password:
+
+panko_database_password:
+panko_keystone_password:
+
+freezer_database_password:
+freezer_keystone_password:
+
+sahara_database_password:
+sahara_keystone_password:
+
+designate_database_password:
+designate_pool_manager_database_password:
+designate_keystone_password:
+# This option must be UUID4 value in string format
+designate_pool_id:
+# This option must be HMAC-MD5 value in string format
+designate_rndc_key:
+
+swift_keystone_password:
+swift_hash_path_suffix:
+swift_hash_path_prefix:
+
+heat_database_password:
+heat_keystone_password:
+heat_domain_admin_password:
+
+murano_database_password:
+murano_keystone_password:
+murano_agent_rabbitmq_password:
+
+ironic_database_password:
+ironic_keystone_password:
+
+ironic_inspector_database_password:
+ironic_inspector_keystone_password:
+
+magnum_database_password:
+magnum_keystone_password:
+
+mistral_database_password:
+mistral_keystone_password:
+
+trove_database_password:
+trove_keystone_password:
+
+ceilometer_database_password:
+ceilometer_keystone_password:
+
+watcher_database_password:
+watcher_keystone_password:
+
+congress_database_password:
+congress_keystone_password:
+
+rally_database_password:
+
+senlin_database_password:
+senlin_keystone_password:
+
+solum_database_password:
+solum_keystone_password:
+
+horizon_secret_key:
+horizon_database_password:
+
+telemetry_secret_key:
+
+manila_database_password:
+manila_keystone_password:
+
+octavia_database_password:
+octavia_keystone_password:
+octavia_ca_password:
+
+searchlight_keystone_password:
+
+tacker_database_password:
+tacker_keystone_password:
+
+zun_database_password:
+zun_keystone_password:
+
+memcache_secret_key:
+
+#HMAC secret key
+osprofiler_secret:
+
+nova_ssh_key:
+ private_key:
+ public_key:
+
+kolla_ssh_key:
+ private_key:
+ public_key:
+
+keystone_ssh_key:
+ private_key:
+ public_key:
+
+bifrost_ssh_key:
+ private_key:
+ public_key:
+
+####################
+# Gnocchi options
+####################
+gnocchi_project_id:
+gnocchi_resource_id:
+gnocchi_user_id:
+
+####################
+# Qdrouterd options
+####################
+qdrouterd_password:
+
+####################
+# RabbitMQ options
+####################
+rabbitmq_password:
+rabbitmq_cluster_cookie:
+outward_rabbitmq_password:
+outward_rabbitmq_cluster_cookie:
+
+####################
+# HAProxy options
+####################
+haproxy_password:
+keepalived_password:
+
+####################
+# Kibana options
+####################
+kibana_password:
+
+####################
+# etcd options
+####################
+etcd_cluster_token:
+
+####################
+# redis options
+####################
+redis_master_password:
diff --git a/bootstrap/vagrant-onap/lib/functions b/bootstrap/vagrant-onap/lib/functions
index 02111fa2c..c2c6d76a5 100755
--- a/bootstrap/vagrant-onap/lib/functions
+++ b/bootstrap/vagrant-onap/lib/functions
@@ -1,16 +1,15 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/commons
+source /var/onap/config/env-vars
source /var/onap/_composed_functions
source /var/onap/_onap_functions
export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' |sort -n | head -1)
-export IP_ADDRESS=$(ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d' ' -f3 | cut -d':' -f2)
+export NIC=$(ip route get 8.8.8.8 | awk '{ print $5; exit }')
+export IP_ADDRESS=$(ifconfig $NIC | grep "inet addr" | tr -s ' ' | cut -d' ' -f3 | cut -d':' -f2)
mvn_conf_file=/root/.m2/settings.xml
-git_src_folder=/opt
# configure_dns() - DNS/GW IP address configuration
function configure_dns {
@@ -18,6 +17,14 @@ function configure_dns {
resolvconf -u
}
+# get_next_ip() - Function that provides the next ip
+function get_next_ip {
+ local ip=${1:-$IP_ADDRESS}
+ ip_hex=$(printf '%.2X%.2X%.2X%.2X\n' `echo $ip | sed -e 's/\./ /g'`)
+ next_ip_hex=$(printf %.8X `echo $(( 0x$ip_hex + 1 ))`)
+ echo $(printf '%d.%d.%d.%d\n' `echo $next_ip_hex | sed -r 's/(..)/0x\1 /g'`)
+}
+
# _git_timed() - git can sometimes get itself infinitely stuck with transient network
# errors or other issues with the remote end. This wraps git in a
# timeout/retry loop and is intended to watch over non-local git
@@ -44,11 +51,15 @@ function _git_timed {
# clone_repo() - Clone Git repository into specific folder
function clone_repo {
- local repo_url=https://git.onap.org/
+ local repo_url=${3:-"https://git.onap.org/"}
local repo=$1
local dest_folder=${2:-$git_src_folder/$repo}
if [ ! -d $dest_folder ]; then
- _git_timed clone ${repo_url}${repo} $dest_folder
+ if [[ "$debug" == "False" ]]; then
+ _git_timed clone --quiet ${repo_url}${repo} $dest_folder
+ else
+ _git_timed clone ${repo_url}${repo} $dest_folder
+ fi
else
pushd $dest_folder
_git_timed pull
@@ -56,9 +67,14 @@ function clone_repo {
fi
}
-# install_dev_tools() - Install basic dependencies
-function install_dev_tools {
- install_packages apt-transport-https ca-certificates curl
+# clone_repos() - Function that clones source repositories for a given project
+function clone_repos {
+ local project=$1
+ local repo_name=${2:-$project}
+
+ for repo in ${repos[$project]}; do
+ clone_repo $repo ${src_folders[$project]}${repo#*$repo_name}
+ done
}
# _install_bind() - Install bind utils
@@ -71,8 +87,18 @@ function install_java {
if is_package_installed openjdk-8-jdk; then
return
fi
- install_package software-properties-common
- add-apt-repository -y ppa:openjdk-r/ppa
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_package software-properties-common
+ add-apt-repository -y ppa:openjdk-r/ppa
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
# Remove Java 7
uninstall_packages default-jre openjdk-7-jdk openjdk-7-jre openjdk-7-jre-headless
@@ -88,8 +114,18 @@ function install_maven {
return
fi
install_java
- install_package software-properties-common
- add-apt-repository -y ppa:andrei-pozolotin/maven3
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_package software-properties-common
+ add-apt-repository -y ppa:andrei-pozolotin/maven3
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
install_package maven3
# Remove Java 7
@@ -103,8 +139,9 @@ function _configure_docker_settings {
local docker_conf_backup=/tmp/docker.backup
local docker_conf=/etc/default/docker
local chameleonsocks_filename=chameleonsocks.sh
+ local max_concurrent_downloads=${1:-3}
- cp $docker_conf $docker_conf_backup
+ cp ${docker_conf} ${docker_conf_backup}
if [ $http_proxy ]; then
echo "export http_proxy=$http_proxy" >> $docker_conf
fi
@@ -119,13 +156,25 @@ function _configure_docker_settings {
port=$(echo $socks_proxy | sed -e "s/^.*://")
PROXY=$socks PORT=$port ./$chameleonsocks_filename --install
rm $chameleonsocks_filename
- cp $docker_conf_backup $docker_conf
+ cp ${docker_conf_backup} ${docker_conf}
fi
fi
- rm $docker_conf_backup
-
- echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock\"" >> $docker_conf
- usermod -a -G docker vagrant
+ rm ${docker_conf_backup}
+
+ echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --max-concurrent-downloads $max_concurrent_downloads \"" >> $docker_conf
+ usermod -aG docker $USER
+
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ service docker restart
+ sleep 10
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
}
# install_nodejs() - Download and install NodeJS
@@ -148,34 +197,52 @@ function install_python {
# _install_pip() - Install Python Package Manager
function _install_pip {
install_python
- if [ ! -f /usr/local/bin/pip ]; then
+ if ! which pip; then
curl -sL https://bootstrap.pypa.io/get-pip.py | python
fi
}
-# install_python_package() - Install a python module
+# install_python_package() - Install python modules
function install_python_package {
- local python_package=$1
+ local python_packages=$@
+
+ _install_pip
+ pip install $python_packages
+}
+
+# install_python_requirements() - Install a list of python modules defined in requirement.txt file
+function install_python_requirements {
+ local python_project_path=$1
_install_pip
- pip install $python_package
+ pushd $python_project_path
+ pip install -r requirements.txt
+ popd
}
# install_docker() - Download and install docker-engine
function install_docker {
- if is_package_installed docker-ce; then
+ if $(docker version &>/dev/null); then
return
fi
- install_package software-properties-common
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
- $(lsb_release -cs) \
- stable"
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_packages software-properties-common linux-image-extra-$(uname -r) linux-image-extra-virtual apt-transport-https ca-certificates curl
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) stable"
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
+
install_package docker-ce
_configure_docker_settings
- service docker restart
- sleep 10
}
# pull_docker_image() - Pull Docker container image from the Public Docker Registry Hub
@@ -189,6 +256,12 @@ function pull_docker_image {
fi
}
+# run_docker_image() - Starts a Docker instance
+function run_docker_image {
+ install_docker
+ docker run $@
+}
+
# install_docker_compose() - Download and install docker-engine
function install_docker_compose {
local docker_compose_version=${1:-1.12.0}
@@ -204,8 +277,8 @@ function _install_ODL {
if [ ! -d /opt/opendaylight/current ]; then
mkdir -p /opt/opendaylight/
wget "https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/"$odl_version"/distribution-karaf-"$odl_version".tar.gz" -P /opt/
- tar xvf "/opt/distribution-karaf-"$odl_version".tar.gz" -C /opt/
- mv "/opt/distribution-karaf-"$odl_version /opt/opendaylight/current
+ tar xvf "/opt/distribution-karaf-"$odl_version".tar.gz" -C /tmp/
+ mv "/tmp/distribution-karaf-"$odl_version /opt/opendaylight/current
rm -rf "/opt/distribution-karaf-"$odl_version".tar.gz"
fi
}
@@ -225,43 +298,87 @@ function start_ODL {
function compile_src {
local src_folder=$1
pushd $src_folder
+ local mvn_build='mvn clean install -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dadditionalparam=-Xdoclint:none'
+ if [[ "$debug" == "False" ]]; then
+ mvn_build+=" -q"
+ fi
if [ -f pom.xml ]; then
install_maven
- mvn clean install -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dadditionalparam=-Xdoclint:none
+ echo "Compiling $src_folder folder..."
+ eval $mvn_build
fi
popd
}
+# compile_repos() - Function that compiles source repositories for a given project
+function compile_repos {
+ local project=$1
+
+ for repo in ${repos[$project]}; do
+ compile_src ${src_folders[$project]}${repo#*$project}
+ done
+}
+
# build_docker_image() - Build Docker container image from source code
function build_docker_image {
local src_folder=$1
local profile=$2
- install_maven
install_docker
pushd $src_folder
- # Cleanup external repo
- sed -i 's|${docker.push.registry}/||g' pom.xml
- local mvn_docker="mvn clean package docker:build"
- if [ $profile ]; then
- mvn_docker+=" -P $profile"
- fi
- if [ $http_proxy ]; then
- if ! grep -ql "docker.buildArg.http_proxy" pom.xml ; then
- mvn_docker+=" -Ddocker.buildArg.http_proxy=$http_proxy"
+ if [ -f pom.xml ]; then
+ install_maven
+ # Cleanup external repo
+ sed -i 's|${docker.push.registry}/||g' pom.xml
+ local docker_build="mvn clean package docker:build"
+ if [ $profile ]; then
+ docker_build+=" -P $profile"
fi
+ if [ $http_proxy ]; then
+ if ! grep -ql "docker.buildArg.http_proxy" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.http_proxy=$http_proxy"
+ fi
if ! grep -ql "docker.buildArg.HTTP_PROXY" pom.xml ; then
- mvn_docker+=" -Ddocker.buildArg.HTTP_PROXY=$http_proxy"
+ docker_build+=" -Ddocker.buildArg.HTTP_PROXY=$http_proxy"
fi
- fi
- if [ $https_proxy ]; then
- if ! grep -ql "docker.buildArg.https_proxy" pom.xml ; then
- mvn_docker+=" -Ddocker.buildArg.https_proxy=$https_proxy"
fi
- if ! grep -ql "docker.buildArg.HTTPS_PROXY" pom.xml ; then
- mvn_docker+=" -Ddocker.buildArg.HTTPS_PROXY=$https_proxy"
+ if [ $https_proxy ]; then
+ if ! grep -ql "docker.buildArg.https_proxy" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.https_proxy=$https_proxy"
+ fi
+ if ! grep -ql "docker.buildArg.HTTPS_PROXY" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.HTTPS_PROXY=$https_proxy"
+ fi
+ fi
+ elif [ -f Dockerfile ]; then
+ # NOTE: Workaround for dmmapbc images
+ sed -i '/LocalKey/d' Dockerfile
+ sed -i "s/nexus3.onap.org\:10003\///g" Dockerfile
+ local docker_build="docker build -t $profile -f ./Dockerfile ."
+ if [ $http_proxy ]; then
+ docker_build+=" --build-arg http_proxy=$http_proxy"
+ docker_build+=" --build-arg HTTP_PROXY=$http_proxy"
+ fi
+ if [ $https_proxy ]; then
+ docker_build+=" --build-arg https_proxy=$https_proxy"
+ docker_build+=" --build-arg HTTPS_PROXY=$https_proxy"
fi
fi
- eval $mvn_docker
+ echo $docker_build
+ eval $docker_build
popd
}
+
+# mount_external_partition() - Create partition and mount the external volume
+function mount_external_partition {
+ local dev_name="/dev/$1"
+ local mount_dir=$2
+
+ sfdisk $dev_name << EOF
+;
+EOF
+ mkfs -t ext4 ${dev_name}1
+ mkdir -p $mount_dir
+ mount ${dev_name}1 $mount_dir
+ echo "${dev_name}1 $mount_dir ext4 errors=remount-ro,noatime,barrier=0 0 1" >> /etc/fstab
+}
diff --git a/bootstrap/vagrant-onap/lib/mr b/bootstrap/vagrant-onap/lib/mr
index f221817fa..499f53f6a 100755
--- a/bootstrap/vagrant-onap/lib/mr
+++ b/bootstrap/vagrant-onap/lib/mr
@@ -1,16 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-mr_src_folder=$git_src_folder/dcae/message-router
-
-# clone_mr_repos() - Function that clones the Message Router repositories
-function clone_mr_repos {
- clone_repo dcae/demo/startup/message-router $mr_src_folder
-}
-
# get_mr_images() - Function that retrieves the Message Router Docker images
function get_mr_images {
pull_docker_image attos/dmaap
@@ -29,7 +20,7 @@ function install_message_router {
# init_mr() - Function that initialize Message Router services
function init_mr {
if [[ "$clone_repo" == "True" ]]; then
- clone_mr_repos
+ clone_repo dcae/demo/startup/message-router $mr_src_folder
fi
if [[ "$skip_get_images" == "False" ]]; then
get_mr_images
diff --git a/bootstrap/vagrant-onap/lib/msb b/bootstrap/vagrant-onap/lib/msb
new file mode 100755
index 000000000..a14e8ea6c
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/msb
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _build_msb_images() - Function that creates Microservices Docker images from source code
+function _build_msb_images {
+ if [[ "$compile_repo" != "True" ]]; then
+ compile_repos "msb"
+ fi
+
+ build_docker_image $msb_src_folder/apigateway/distributions/msb-apigateway/src/main/basedocker onap/msb/msb_base
+ build_docker_image $msb_src_folder/apigateway/distributions/msb-apigateway/src/main/docker onap/msb/msb_apigateway
+ build_docker_image $msb_src_folder/discovery/distributions/msb-discovery/src/main/docker onap/msb/msb_discovery
+}
+
+# get_msb_images() - Function that retrieves the Microservices Bus images
+function get_msb_images {
+ pull_docker_image "consul:0.9.3"
+ if [[ "$build_image" == "True" ]]; then
+ _build_msb_images
+ else
+ unset docker_version
+ pull_onap_image msb/msb_base
+ pull_onap_image msb/msb_apigateway
+ pull_onap_image msb/msb_discovery
+ fi
+}
+
+# install_msb() - Downloads and configure Microservices Bus source code
+function install_msb {
+ run_docker_image -d --net=host --name msb_consul consul:0.9.3
+ run_docker_image -d --net=host --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery
+ run_docker_image -d --net=host -e "ROUTE_LABELS=visualRange:1" --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway
+}
+
+# init_msb() - Function that initialize Message Router services
+function init_msb {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "msb"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "msb"
+ fi
+ fi
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_msb_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_msb
+ fi
+ fi
+}
diff --git a/bootstrap/vagrant-onap/lib/mso b/bootstrap/vagrant-onap/lib/mso
index 5439d991b..44d4a7b04 100755
--- a/bootstrap/vagrant-onap/lib/mso
+++ b/bootstrap/vagrant-onap/lib/mso
@@ -1,33 +1,13 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-src_folder=$git_src_folder/mso
-mso_repos=("mso" "mso/chef-repo" "mso/docker-config" "mso/libs"
-"mso/mso-config")
-
-# clone_all_mso_repos() - Function that clones MSO source repo.
-function clone_all_mso_repos {
- for repo in ${mso_repos[@]}; do
- clone_repo $repo $src_folder${repo#*mso}
- done
-}
-
-# compile_all_mso_repos() - Function that compiles MSO source repo.
-function compile_all_mso_repos {
- for repo in ${mso_repos[@]}; do
- compile_src $src_folder${repo#*mso}
- done
-}
-
# get_mso_images() - Function that retrieves or create MSO Docker images
function get_mso_images {
if [[ "$build_image" == "True" ]]; then
export GIT_NO_PROJECT=/opt/
- compile_src $src_folder
- build_docker_image $src_folder/packages/docker docker
+ compile_src $mso_src_folder
+ build_docker_image $mso_src_folder/packages/docker docker
fi
}
@@ -82,10 +62,10 @@ EOF
export MSO_CONFIG_UPDATES
export MSO_DOCKER_IMAGE_VERSION=$docker_version
- is_package_installed docker-ce || install_docker
+ install_docker
install_docker_compose
# Deploy the environment
- pushd $src_folder/docker-config
+ pushd $mso_src_folder/docker-config
chmod +x deploy.sh
if [[ "$build_image" == "True" ]]; then
bash deploy.sh
@@ -99,9 +79,9 @@ EOF
# init_mso() - Function that initialize MSO services
function init_mso {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_mso_repos
+ clone_repos "mso"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_mso_repos
+ compile_repos "mso"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/multicloud b/bootstrap/vagrant-onap/lib/multicloud
index b4a185aaf..d2b09f2aa 100755
--- a/bootstrap/vagrant-onap/lib/multicloud
+++ b/bootstrap/vagrant-onap/lib/multicloud
@@ -1,44 +1,47 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-multicloud_src_folder=$git_src_folder/multicloud
-multicloud_repos=("multicloud" "multicloud/framework" "multicloud/openstack" \
-"multicloud/openstack/vmware" "multicloud/openstack/windriver" \
-"multicloud/azure")
-
-# clone_multicloud_repos() - Function that clones the Multi Cloud repositories
-function clone_multicloud_repos {
- for repo in ${multicloud_repos[@]}; do
- clone_repo $repo $multicloud_src_folder${repo#*multicloud}
- done
-}
+openstack_release="newton"
-# compile_multicloud_repos() -
-function compile_multicloud_repos {
- for repo in ${multicloud_repos[@]}; do
- compile_src $multicloud_src_folder${repo#*multicloud}
- done
+# _build_multicloud_images() - Function that builds docker images from source code
+function _build_multicloud_images {
+ install_docker
+ pushd ${src_folders[multicloud]}/openstack/$openstack_release
+ install_python_requirements .
+ python setup.py develop
+ #bash build_image.sh
+ popd
}
# get_multicloud_images() -
function get_multicloud_images {
- echo "pass"
+ if [[ "$build_image" == "True" ]]; then
+ _build_multicloud_images
+ else
+ pull_onap_image multicloud/openstack-$openstack_release
+ fi
}
# install_multicloud() -
function install_multicloud {
- echo "pass"
+ #pushd ${src_folders[multicloud]}/openstack/$openstack_release
+ #/opt/docker/docker-compose up -d
+ #popd
+ if [[ "$build_image" == "True" ]]; then
+ multicloud-api --port 9003 --host 0.0.0.0 &
+ else
+ docker_id=`docker images | grep onap/multicloud/openstack-$openstack_release | grep latest | awk '{print $3; exit}'`
+ docker run -d -p 0.0.0.0:9003:9003 $docker_id
+ fi
}
# init_multicloud() - Function that initialize Multi Cloud services
function init_multicloud {
if [[ "$clone_repo" == "True" ]]; then
- clone_multicloud_repos
+ clone_repos "multicloud"
if [[ "$compile_repo" == "True" ]]; then
- compile_multicloud_repos
+ compile_repos "multicloud"
fi
fi
if [[ "$skip_get_images" == "False" ]]; then
diff --git a/bootstrap/vagrant-onap/lib/oom b/bootstrap/vagrant-onap/lib/oom
new file mode 100755
index 000000000..ef7e5ede5
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/oom
@@ -0,0 +1,220 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+RANCHER_PORT=8880
+oom_delay=30
+export RANCHER_URL=http://localhost:$RANCHER_PORT
+export RANCHER_ACCESS_KEY='access_key'
+export RANCHER_SECRET_KEY='secret_key'
+
+# _install_docker() - Function that installs Docker version for Rancher
+function _install_docker {
+ if ! $(docker version &>/dev/null); then
+ curl https://releases.rancher.com/install-docker/1.12.sh | sh
+ _configure_docker_settings 15
+ fi
+}
+
+# _pull_rancher_images() - Function that retrieves Rancher images required for k8s
+function _pull_rancher_images {
+ for image in "net:v0.13.5" "k8s:v1.8.5-rancher3" \
+"lb-service-rancher:v0.7.17" "network-manager:v0.7.18" "metadata:v0.9.5" \
+"kubectld:v0.8.5" "kubernetes-agent:v0.6.6" "dns:v0.15.3" \
+"kubernetes-auth:v0.0.8" "healthcheck:v0.3.3" "etcd:v2.3.7-13" \
+"etc-host-updater:v0.0.3" "net:holder"; do
+ pull_docker_image rancher/$image &
+ done
+}
+
+# _pull_k8s_images() - Function that retrieves Google k8s images
+function _pull_k8s_images {
+ for image in "kubernetes-dashboard-amd64:v1.7.1" \
+"k8s-dns-sidecar-amd64:1.14.5" "k8s-dns-kube-dns-amd64:1.14.5" \
+"k8s-dns-dnsmasq-nanny-amd64:1.14.5" "heapster-influxdb-amd64:v1.3.3" \
+"heapster-grafana-amd64:v4.4.3" "heapster-amd64:v1.4.0" "pause-amd64:3.0"; do
+ pull_docker_image gcr.io/google_containers/$image &
+ done
+}
+
+# _install_rancher() - Function that installs Rancher CLI and container
+function _install_rancher {
+ local rancher_version=v0.6.5
+ local rancher_server_version=v1.6.10
+ local rancher_server=rancher/server:$rancher_server_version
+
+ if [ ! -d /opt/rancher/current ]; then
+ mkdir -p /opt/rancher/current
+ wget https://github.com/rancher/cli/releases/download/$rancher_version/rancher-linux-amd64-$rancher_version.tar.gz
+ tar -xzf rancher-linux-amd64-$rancher_version.tar.gz -C /tmp
+ mv /tmp/rancher-$rancher_version/rancher /opt/rancher/current/
+ fi
+
+ _install_docker
+ pull_docker_image $rancher_server
+ run_docker_image -d --restart=unless-stopped -p $RANCHER_PORT:8080 $rancher_server
+ while true; do
+ if curl --fail -X GET $RANCHER_URL; then
+ break
+ fi
+ echo "waiting for racher"
+ sleep $oom_delay
+ done
+}
+
+# _install_kubernetes() - Function that deploys kubernetes via RancherOS host registration
+function _install_kubernetes {
+ local rancher_agent_version=v1.2.7
+ local rancher_agent=rancher/agent:$rancher_agent_version
+
+ _install_rancher
+
+ _pull_rancher_images
+ _pull_k8s_images
+ pull_docker_image $rancher_agent
+ _wait_docker_pull
+
+ pushd /opt/rancher/current/
+ export RANCHER_ENVIRONMENT=`./rancher env create -t kubernetes onap_on_kubernetes`
+ popd
+
+ install_python_package rancher-agent-registration
+ export no_proxy=$no_proxy,$IP_ADDRESS
+ rancher-agent-registration --host-ip $IP_ADDRESS --url http://$IP_ADDRESS:$RANCHER_PORT --environment $RANCHER_ENVIRONMENT --key $RANCHER_ACCESS_KEY --secret $RANCHER_SECRET_KEY
+}
+
+# _install_kubectl() - Function that installs kubectl as client for kubernetes
+function _install_kubectl {
+ if ! $(kubectl version &>/dev/null); then
+ rm -rf ~/.kube
+ curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
+ chmod +x ./kubectl
+ mv ./kubectl /usr/local/bin/kubectl
+ mkdir ~/.kube
+ pushd ~/.kube
+ python /var/onap/files/kubectl_config_generator.py
+ popd
+ fi
+}
+
+# _install_helm() - Function that install Kubernetes Package Manager
+function _install_helm {
+ local helm_version=v2.3.0
+
+ if ! $(helm version &>/dev/null); then
+ wget http://storage.googleapis.com/kubernetes-helm/helm-${helm_version}-linux-amd64.tar.gz
+ tar -zxvf helm-${helm_version}-linux-amd64.tar.gz -C /tmp
+ mv /tmp/linux-amd64/helm /usr/local/bin/helm
+ helm init
+ fi
+}
+
+# _pull_images_from_yaml() - Function that parses a yaml file and pull their images
+function _pull_images_from_yaml_file {
+ local values_file=$1
+ local prefix=$2
+ local s='[[:space:]]*'
+ local w='[a-zA-Z0-9_]*'
+ fs=`echo @|tr @ '\034'`
+
+ for line in $(sed -ne "s|^\($s\):|\1|" \
+-e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
+-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $values_file |
+awk -F$fs '{
+indent = length($1)/2;
+vname[indent] = $2;
+for (i in vname) {
+ if (i > indent) {
+ delete vname[i]}
+ }
+ if (length($3) > 0) {
+ vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])(".")}
+ printf("%s%s%s=%s\n", "'$prefix'",vn, $2, $3);
+ }
+}' | grep image); do
+ echo $line
+ if echo $line | grep -q Version ; then
+ pull_docker_image "$image_name:$(echo $line | awk -F "=" '{print $2}')" &
+ else
+ image_name=`echo ${line#*=}`
+ if [[ ${image_name#*${nexus_docker_repo:-nexus3.onap.org:10001}} == *:* ]]; then
+ pull_docker_image $image_name &
+ else
+ pull_docker_image $image_name:latest
+ fi
+ fi
+ done
+}
+
+# _wait_docker_pull() - Function that waits for all docker pull processes
+function _wait_docker_pull {
+ local counter=150
+
+ while [ $(ps -ef | grep "docker pull" | wc -l) -gt 1 ]; do
+ sleep $oom_delay
+ counter=$((counter - 1))
+ if [ "$counter" -eq 0 ]; then
+ break
+ fi
+ done
+}
+
+# get_oom_images() - Function that retrieves ONAP images from official hub
+function get_oom_images {
+ if [[ "$build_image" == "True" ]]; then
+ # TODO(electrocucaracha): Create a function for calling the build docker function of every ONAP project
+ echo "Not Implemented"
+ else
+ if [[ "$clone_repo" != "True" ]]; then
+ clone_repos "oom"
+ fi
+
+ docker_openecomp_login
+ for values_file in `find ${src_folders[oom]}/kubernetes -name values.yaml -type f`; do
+ _pull_images_from_yaml_file $values_file
+ done
+ docker logout
+ _wait_docker_pull
+ fi
+}
+
+# _install_oom() - Function that clones OOM and deploys ONAP
+function install_oom {
+ if [[ "$clone_repo" != "True" ]]; then
+ clone_repos "oom"
+ fi
+ pushd ${src_folders[oom]}/kubernetes/oneclick
+ source setenv.bash
+
+ pushd ${src_folders[oom]}/kubernetes/config
+ cp onap-parameters-sample.yaml onap-parameters.yaml
+ ./createConfig.sh -n onap
+ popd
+
+ for app in consul msb mso message-router sdnc vid robot portal policy appc aai sdc dcaegen2 log cli multicloud clamp vnfsdk uui aaf vfc kube2msb; do
+ ./createAll.bash -n onap -a $app
+ done
+ popd
+}
+
+# init_oom() - Function that deploys ONAP using OOM
+function init_oom {
+ mount_external_partition sda /var/lib/docker/
+ _install_kubernetes
+ _install_kubectl
+ _install_helm
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "oom"
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_oom_images
+ if [[ "$skip_install" == "False" ]]; then
+ until kubectl cluster-info; do
+ echo "waiting for kubernetes host"
+ sleep $oom_delay
+ done
+ install_oom
+ fi
+ fi
+}
diff --git a/bootstrap/vagrant-onap/lib/openstack b/bootstrap/vagrant-onap/lib/openstack
new file mode 100755
index 000000000..205d7ae80
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/openstack
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+kolla_config=/etc/kolla
+kolla_build=$kolla_config/kolla-build.conf
+kolla_passwords=$kolla_config/passwords.yml
+kolla_globals=$kolla_config/globals.yml
+kolla_inventory=/var/onap/files/all-in-one
+
+# install_dependencies() - Function that installs Kolla-Ansible requirements
+function install_dependencies {
+ install_docker
+
+ mkdir -p /etc/systemd/system/docker.service.d
+ tee /etc/systemd/system/docker.service.d/kolla.conf <<-'EOF'
+[Service]
+MountFlags=shared
+EOF
+ systemctl daemon-reload
+ systemctl restart docker
+
+ install_python_package ansible docker kolla-ansible python-openstackclient
+}
+
+# configure_deploy() - Function that modifies configuration files
+function configure_deploy {
+ local network_id=$1
+ local enable_opendaylight=${2-False}
+ local openstack_services="main = ceilometer,cinder,glance,heat,horizon,isci,keystone,neutron,nova-,swift"
+ nic=$(ip route get $network_id | awk '{ print $4; exit }')
+ ip_address=$(ip route get $network_id | awk '{ print $6; exit }')
+ internal_vip_address=$(get_next_ip $ip_address)
+
+ mkdir -p $kolla_config
+ cp /var/onap/files/globals.yml $kolla_globals
+ cp /var/onap/files/passwords.yml $kolla_passwords
+ cp /var/onap/files/kolla-build.conf $kolla_build
+ kolla-genpwd
+ echo "network_interface: \"$nic\"" >> $kolla_globals
+ echo "kolla_internal_vip_address: \"$internal_vip_address\"" >> $kolla_globals
+ echo "api_interface: \"{{ network_interface }}\"" >> $kolla_globals
+ if [[ $enable_opendaylight == True ]]; then
+ echo "enable_opendaylight: \"yes\"" >> $kolla_globals
+ openstack_services+=",opendaylight"
+ fi
+ echo $openstack_services >> $kolla_build
+
+ echo "$ip_address $(hostname)" >> /etc/hosts
+}
+
+# get_openstack_images() - Function that retrieves or builds docker images
+function get_openstack_images {
+ if [[ "$build_image" == "True" ]]; then
+ install_python_package kolla
+ kolla-build --config-file $kolla_build
+ else
+ kolla-ansible pull -i $kolla_inventory
+ fi
+}
+
+# deploy_openstack() - Function that provisions an OpenStack deployment
+function deploy_openstack {
+ install_dependencies
+ configure_deploy ${1:-"192.168.53.0"} "True"
+
+ get_openstack_images
+ kolla-ansible deploy -i $kolla_inventory
+ kolla-ansible post-deploy
+ echo "source /etc/kolla/admin-openrc.sh" >> ${HOME}/.bashrc
+}
diff --git a/bootstrap/vagrant-onap/lib/policy b/bootstrap/vagrant-onap/lib/policy
index be4cb2c18..ad982ad95 100755
--- a/bootstrap/vagrant-onap/lib/policy
+++ b/bootstrap/vagrant-onap/lib/policy
@@ -1,28 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-policy_src_folder=$git_src_folder/policy
-policy_repos=("policy/api" "policy/common" "policy/docker" \
-"policy/drools-applications" "policy/drools-pdp" "policy/engine" \
-"policy/gui" "policy/pap" "policy/pdp")
-
-# clone_all_policy_repos() - Function that clones Policy source repo.
-function clone_all_policy_repos {
- for repo in ${policy_repos[@]}; do
- clone_repo $repo $policy_src_folder${repo#*policy}
- done
-}
-
-# compile_all_policy_repos() - Function that compiles Policy source repo.
-function compile_all_policy_repos {
- for repo in ${policy_repos[@]}; do
- compile_src $policy_src_folder${repo#*policy}
- done
-}
-
# _build_policy_images() - Function that build Policy docker images from source code
function _build_policy_images {
compile_src $policy_src_folder/docker
@@ -61,9 +40,9 @@ function install_policy {
# init_policy() - Function that initialize Policy services
function init_policy {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_policy_repos
+ clone_repos "policy"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_policy_repos
+ compile_repos "policy"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/portal b/bootstrap/vagrant-onap/lib/portal
index b5e768b2c..fe5469822 100755
--- a/bootstrap/vagrant-onap/lib/portal
+++ b/bootstrap/vagrant-onap/lib/portal
@@ -1,33 +1,28 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-portal_src_folder=$git_src_folder/portal
-portal_repos=("portal" "portal/sdk" "ecompsdkos" "ui/dmaapbc")
-
# clone_all_portal_repos() - Function that clones Portal source repo.
function clone_all_portal_repos {
- for repo in ${portal_repos[@]}; do
+ for repo in ${repos[portal]}; do
if [[ "$repo" == "ui/dmaapbc" ]];then
prefix="ui"
else
prefix="portal"
fi
- clone_repo $repo $portal_src_folder/${repo#*$prefix}
+ clone_repo $repo ${src_folders[portal]}/${repo#*$prefix}
done
}
# compile_all_portal_repos() - Function that compiles Portal source repo.
function compile_all_portal_repos {
- for repo in ${portal_repos[@]}; do
+ for repo in ${repos[portal]}; do
if [[ "$repo" == "ui/dmaapbc" ]];then
prefix="ui"
else
prefix="portal"
fi
- compile_src $portal_src_folder/${repo#*$prefix}
+ compile_src ${src_folders[portal]}/${repo#*$prefix}
done
}
@@ -35,7 +30,7 @@ function compile_all_portal_repos {
function _build_portal_images {
install_maven
- pushd $portal_src_folder/deliveries
+ pushd ${src_folders[portal]}/deliveries
chmod +x *.sh
export MVN=$(which mvn)
export GLOBAL_SETTINGS_FILE=/usr/share/maven3/conf/settings.xml
@@ -66,7 +61,7 @@ function install_portal {
docker rm -f ecompdb_portal
docker rm -f 1610-1
- pushd $portal_src_folder/deliveries
+ pushd ${src_folders[portal]}/deliveries
mkdir -p /PROJECT/OpenSource/UbuntuEP/logs
install_package unzip
unzip -o etc.zip -d /PROJECT/OpenSource/UbuntuEP/
diff --git a/bootstrap/vagrant-onap/lib/robot b/bootstrap/vagrant-onap/lib/robot
index ebcca6e6b..90fbcf8c0 100755
--- a/bootstrap/vagrant-onap/lib/robot
+++ b/bootstrap/vagrant-onap/lib/robot
@@ -1,39 +1,15 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-robot_src_folder=$git_src_folder/testsuite
-robot_repos=("heatbridge" "properties" "python-testing-utils")
-
-# clone_robot_repos() - Function that clones Robot source repo.
-function clone_robot_repos {
- clone_repo testsuite $robot_src_folder
-
- for dirc in ${robot_repos[@]}; do
- clone_repo testsuite/$dirc $robot_src_folder/$dirc
- done
-}
-
-# compile_robot_repos() - Function that compile Robot source repo.
-function compile_robot_repos {
- install_python_package tox
- compile_src $robot_src_folder
-
- for dirc in ${robot_repos[@]}; do
- compile_src $robot_src_folder/$dirc
- done
-}
-
# _setup_ete_folder() - Create and copy ete folder structure
function _setup_ete_folder {
mkdir -p /opt/eteshare/config
- cp $src_folder/integration_* /opt/eteshare/config
- cp $src_folder/vm_config2robot.sh /opt/eteshare/config
- cp $src_folder/ete.sh /opt
- cp $src_folderdemo.sh /opt
+ cp $robot_src_folder/integration_* /opt/eteshare/config
+ cp $robot_src_folder/vm_config2robot.sh /opt/eteshare/config
+ cp $robot_src_folder/ete.sh /opt
+ cp $robot_src_folder/demo.sh /opt
chmod +x /opt/ete.sh
chmod +x /opt/demo.sh
@@ -47,16 +23,16 @@ function get_robot_images {
# install_robot() - Run Robot services
function install_robot {
docker rm -f openecompete_container
- docker run -d --name openecompete_container -v /opt/eteshare:/share -p 88:88 $nexus_docker_repo/openecomp/testsuite:$docker_version
+ run_docker_image -d --name openecompete_container -v /opt/eteshare:/share -p 88:88 $nexus_docker_repo/openecomp/testsuite:$docker_version
}
# init_robot() - Function that initialize Robot services
function init_robot {
if [[ "$clone_repo" == "True" ]]; then
- clone_robot_repos
+ clone_repos "robot" "testsuite"
_setup_ete_folder
if [[ "$compile_repo" == "True" ]]; then
- compile_robot_repos
+ compile_repos "robot"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/sdc b/bootstrap/vagrant-onap/lib/sdc
index a21e2a418..fee583d79 100755
--- a/bootstrap/vagrant-onap/lib/sdc
+++ b/bootstrap/vagrant-onap/lib/sdc
@@ -1,32 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-sdc_src_folder=$git_src_folder/sdc
-sdc_repos=("sdc" "sdc/jtosca" "sdc/sdc-distribution-client"
-"sdc/sdc-docker-base" "sdc/sdc-titan-cassandra" "sdc/sdc-tosca"
-"sdc/sdc-vnfdesign" "sdc/sdc-workflow-designer" "sdc/sdc_common")
-
-# _mount_external_partition() - Create partition and mount the external volume
-function _mount_external_partition {
- cat <<EOL > /tmp/sdc_ext_volume_partitions.txt
-# partition table of /dev/sdb
-unit: sectors
-
-/dev/sdb1 : start= 2048, size=209713152, Id=83
-/dev/sdb2 : start= 0, size= 0, Id= 0
-/dev/sdb3 : start= 0, size= 0, Id= 0
-/dev/sdb4 : start= 0, size= 0, Id= 0
-EOL
- sfdisk --force /dev/sdb < /tmp/sdc_ext_volume_partitions.txt
- mkfs -t ext4 /dev/sdb1
- mkdir -p /data
- mount /dev/sdb1 /data
- echo "/dev/sdb1 /data ext4 errors=remount-ro,noatime,barrier=0 0 1" >> /etc/fstab
-}
-
# _init_data_folders() - Function that initialize the data folders
function _init_data_folders {
mkdir -p /data/environments
@@ -47,20 +22,6 @@ alias health='/data/scripts/docker_health.sh'
EOL
}
-# clone_all_sdc_repos() - Function that clones SDC source repo.
-function clone_all_sdc_repos {
- for repo in ${sdc_repos[@]}; do
- clone_repo $repo $sdc_src_folder${repo#*sdc}
- done
-}
-
-# compile_all_sdc_repos() - Function that compiles SDC source repo.
-function compile_all_sdc_repos {
- for repo in ${sdc_repos[@]}; do
- compile_src $sdc_src_folder${repo#*sdc}
- done
-}
-
# get_sdc_images() - Function that retrieves the SDC docker images
function get_sdc_images {
if [[ "$build_image" == "True" ]]; then
@@ -103,11 +64,11 @@ function install_sdc {
# init_sdc() - Function that initialize SDC services
function init_sdc {
- _mount_external_partition
+ mount_external_partition sdb /data/
if [[ "$clone_repo" == "True" ]]; then
- clone_all_sdc_repos
+ clone_repos "sdc"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_sdc_repos
+ compile_repos "sdc"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/sdnc b/bootstrap/vagrant-onap/lib/sdnc
index 5faca942e..a8cf756b4 100755
--- a/bootstrap/vagrant-onap/lib/sdnc
+++ b/bootstrap/vagrant-onap/lib/sdnc
@@ -1,24 +1,11 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
source /var/onap/ccsdk
-sdnc_src_folder=$git_src_folder/openecomp/sdnc
-sdnc_repos=("sdnc/adaptors" "sdnc/architecture" "sdnc/core" "sdnc/features" \
-"sdnc/northbound" "sdnc/oam" "sdnc/parent" "sdnc/plugins")
-
-# clone_all_sdnc_repos() - Function that clones SDNC source repo.
-function clone_all_sdnc_repos {
- for repo in ${sdnc_repos[@]}; do
- clone_repo $repo $sdnc_src_folder${repo#*sdnc}
- done
-}
-
# compile_all_sdnc_repos() - Function that compiles SDNC source repo.
function compile_all_sdnc_repos {
- for repo in ${sdnc_repos[@]}; do
+ for repo in ${repos[sdnc]}; do
if [[ "$repo" == "sdnc/core" ]]; then
compile_src $sdnc_src_folder/core/rootpom
fi
@@ -32,7 +19,6 @@ function _build_sdnc_images {
get_ccsdk_images
install_package unzip
- clone_repo sdnc/oam $folder
# The OAM code depends on all the SDNC repos which should be downloaded and compiled first
if [[ "$compile_repo" != "True" ]]; then
compile_src $folder
@@ -56,7 +42,6 @@ function get_sdnc_images {
# install_sdnc() - Download and install SDNC services from source code
function install_sdnc {
- clone_repo sdnc/oam $sdnc_src_folder/oam
pushd $sdnc_src_folder/oam/installation/src/main/yaml
install_docker_compose
/opt/docker/docker-compose up -d
@@ -66,7 +51,7 @@ function install_sdnc {
# init_sdnc() - Function that initialize SDNC services
function init_sdnc {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_sdnc_repos
+ clone_repos "sdnc"
if [[ "$compile_repo" == "True" ]]; then
compile_all_sdnc_repos
fi
diff --git a/bootstrap/vagrant-onap/lib/vfc b/bootstrap/vagrant-onap/lib/vfc
index fc26af282..2b5407ca3 100755
--- a/bootstrap/vagrant-onap/lib/vfc
+++ b/bootstrap/vagrant-onap/lib/vfc
@@ -1,23 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-vfc_src_folder=$git_src_folder/vfc
-vfc_repos=("vfc/gvnfm" "vfc/gvnfm/vnflcm" "vfc/gvnfm/vnfmgr" \
-"vfc/gvnfm/vnfres" "vfc/nfvo" "vfc/nfvo/catalog" "vfc/nfvo/driver" \
-"vfc/nfvo/driver/ems" "vfc/nfvo/driver/sfc" "vfc/nfvo/driver/vnfm" \
-"vfc/nfvo/driver/vnfm/gvnfm" "vfc/nfvo/driver/vnfm/svnfm" "vfc/nfvo/lcm" \
-"vfc/nfvo/resmanagement" "vfc/nfvo/wfengine")
-
-# clone_all_vfc_repos() - Function that clones VF-C source repo.
-function clone_all_vfc_repos {
- for repo in ${vfc_repos[@]}; do
- clone_repo $repo $vfc_src_folder${repo#*vfc}
- done
-}
-
# compile_all_vfc_repos() - Function that compiles VF-C source repo.
function compile_all_vfc_repos {
install_python_package tox
@@ -85,11 +69,11 @@ function install_vfc {
vnfres_image=`docker images | grep vnfres | grep latest| awk '{print $1 ":" $2}'`
gvnfmdriver_image=`docker images | grep gvnfmdriver | grep latest| awk '{print $1 ":" $2}'`
- docker run -d --name vfc-nslcm -p 8403:8403 -e MSB_ADDR=127.0.0.1 $nslcm_image
- docker run -d --name vfc-vnflcm -p 8801:8801 -e MSB_ADDR=127.0.0.1 $vnflcm_image
- docker run -d --name vfc-vnfmgr -p 8803:8803 -e MSB_ADDR=127.0.0.1 $vnfmgr_image
- docker run -d --name vfc-vnfres -p 8802:8802 -e MSB_ADDR=127.0.0.1 $vnfres_image
- docker run -d --name vfc-gvnfmdriver -p 8484:8484 -e MSB_ADDR=127.0.0.1 $gvnfmdriver_image
+ run_docker_image -d --name vfc-nslcm -p 8403:8403 -e MSB_ADDR=127.0.0.1 $nslcm_image
+ run_docker_image -d --name vfc-vnflcm -p 8801:8801 -e MSB_ADDR=127.0.0.1 $vnflcm_image
+ run_docker_image -d --name vfc-vnfmgr -p 8803:8803 -e MSB_ADDR=127.0.0.1 $vnfmgr_image
+ run_docker_image -d --name vfc-vnfres -p 8802:8802 -e MSB_ADDR=127.0.0.1 $vnfres_image
+ run_docker_image -d --name vfc-gvnfmdriver -p 8484:8484 -e MSB_ADDR=127.0.0.1 $gvnfmdriver_image
# TODO(sshank): Run other VFC component docker images.
}
@@ -99,7 +83,7 @@ function init_vfc {
install_package libmysqlclient-dev
if [[ "$clone_repo" == "True" ]]; then
- clone_all_vfc_repos
+ clone_repos "vfc"
if [[ "$compile_repo" == "True" ]]; then
compile_all_vfc_repos
fi
diff --git a/bootstrap/vagrant-onap/lib/vid b/bootstrap/vagrant-onap/lib/vid
index f99fd6042..f5ca9d293 100755
--- a/bootstrap/vagrant-onap/lib/vid
+++ b/bootstrap/vagrant-onap/lib/vid
@@ -1,26 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-vid_src_folder=$git_src_folder/vid
-vid_repos=("vid" "vid/asdcclient")
-
-# clone_all_vid_repos() - Function that clones VID source code.
-function clone_all_vid_repos {
- for repo in ${vid_repos[@]}; do
- clone_repo $repo $vid_src_folder${repo#*vid}
- done
-}
-
-# compile_all_vid_repos() - Function that compiles VID source repo.
-function compile_all_vid_repos {
- for repo in ${vid_repos[@]}; do
- compile_src $vid_src_folder${repo#*vid}
- done
-}
-
# _build_vid_images() - Function that builds VID docker images
function _build_vid_images {
if [[ "$compile_repo" != "True" ]]; then
@@ -46,16 +27,16 @@ function install_vid {
docker rm -f vid-mariadb
docker rm -f vid-server
- docker run --name vid-mariadb -e MYSQL_DATABASE=vid_openecomp -e MYSQL_USER=vidadmin -e MYSQL_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U -e MYSQL_ROOT_PASSWORD=LF+tp_1WqgSY -v /opt/vid/lf_config/vid-my.cnf:/etc/mysql/my.cnf -v /opt/vid/lf_config/vid-pre-init.sql:/docker-entrypoint-initdb.d/vid-pre-init.sql -v /var/lib/mysql -d mariadb:10
- docker run -e VID_MYSQL_DBNAME=vid_openecomp -e VID_MYSQL_PASS=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U --name vid-server -p 8080:8080 --link vid-mariadb:vid-mariadb-docker-instance -d $vid_image
+ run_docker_image --name vid-mariadb -e MYSQL_DATABASE=vid_openecomp -e MYSQL_USER=vidadmin -e MYSQL_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U -e MYSQL_ROOT_PASSWORD=LF+tp_1WqgSY -v /opt/vid/lf_config/vid-my.cnf:/etc/mysql/my.cnf -v /opt/vid/lf_config/vid-pre-init.sql:/docker-entrypoint-initdb.d/vid-pre-init.sql -v /var/lib/mysql -d mariadb:10
+ run_docker_image -e VID_MYSQL_DBNAME=vid_openecomp -e VID_MYSQL_PASS=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U --name vid-server -p 8080:8080 --link vid-mariadb:vid-mariadb-docker-instance -d $vid_image
}
# init_vid() - Function that initialize Vid services
function init_vid {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_vid_repos
+ clone_repos "vid"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_vid_repos
+ compile_repos "vid"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/vnfsdk b/bootstrap/vagrant-onap/lib/vnfsdk
index c23ed8581..58e3a9aa2 100644..100755
--- a/bootstrap/vagrant-onap/lib/vnfsdk
+++ b/bootstrap/vagrant-onap/lib/vnfsdk
@@ -1,26 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-vnfsdk_src_folder=$git_src_folder/vnfsdk
-vnfsdk_repos=("vnfsdk/compliance" "vnfsdk/functest" "vnfsdk/lctest" "vnfsdk/model" "vnfsdk/pkgtools" "vnfsdk/refrepo" "vnfsdk/validation")
-
-# clone_all_vnfsdk_repos() - Function that clones vnfsdk source repo.
-function clone_all_vnfsdk_repos {
- for repo in ${vnfsdk_repos[@]}; do
- clone_repo $repo $vnfsdk_src_folder${repo#*vnfsdk}
- done
-}
-
-# compile_all_vnfsdk_repos - Function that builds vnfsdk source repo
-function compile_all_vnfsdk_repos {
- for repo in ${vnfsdk_repos[@]}; do
- compile_src $vnfsdk_src_folder${repo#*vnfsdk}
- done
-}
-
# _build_vnfsdk_images() - Builds VNFSDK images from source code
function _build_vnfsdk_images {
install_package unzip
@@ -51,9 +32,9 @@ function install_vnfsdk {
# init_vnfsdk() - Init VNFSDK services
function init_vnfsdk {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_vnfsdk_repos
+ clone_repos "vnfsdk"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_vnfsdk_repos
+ compile_repos "vnfsdk"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/vvp b/bootstrap/vagrant-onap/lib/vvp
index 588f32b87..f24431ee6 100644..100755
--- a/bootstrap/vagrant-onap/lib/vvp
+++ b/bootstrap/vagrant-onap/lib/vvp
@@ -1,26 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-vvp_src_folder=$git_src_folder/vvp
-vvp_repos=("vvp/ansible-ice-bootstrap" "vvp/cms" "vvp/devkit" "vvp/documentation" "vvp/engagementmgr" "vvp/gitlab" "vvp/image-scanner" "vvp/jenkins" "vvp/portal" "vvp/postgresql" "vvp/test-engine" "vvp/validation-scripts")
-
-# clone_all_vvp_repos() - Function that clones vvp source repo.
-function clone_all_vvp_repos {
- for repo in ${vvp_repos[@]}; do
- clone_repo $repo $vvp_src_folder${repo#*vvp}
- done
-}
-
-# compile_all_vvp_repos - Function that builds vvp source repo
-function compile_all_vvp_repos {
- for repo in ${vvp_repos[@]}; do
- compile_src $vvp_src_folder${repo#*vvp}
- done
-}
-
# _build_vvp_images() - Builds VNFSDK images from source code
function _build_vvp_images {
echo "pass"
@@ -44,9 +25,9 @@ function install_vvp {
# init_vvp() - Init VNFSDK services
function init_vvp {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_vvp_repos
+ clone_repos "vvp"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_vvp_repos
+ compile_repos "vvp"
fi
fi