aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--deployments/.env6
-rwxr-xr-xdeployments/_functions.sh36
-rw-r--r--deployments/docker-compose.yml31
-rwxr-xr-xdeployments/start-dev.sh31
-rwxr-xr-xdeployments/start.sh35
-rw-r--r--docs/bare_metal_provisioning.rst49
-rw-r--r--docs/index.rst6
-rw-r--r--docs/kud_architecture.rst (renamed from docs/krd_architecture.rst)20
-rwxr-xr-xkud/hosting_providers/baremetal/aio.sh (renamed from kud/hosting_providers/vagrant/aio.sh)24
-rw-r--r--kud/hosting_providers/vagrant/Vagrantfile2
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh48
-rwxr-xr-xkud/tests/_functions.sh23
12 files changed, 159 insertions, 152 deletions
diff --git a/deployments/.env b/deployments/.env
index c2630b85..b4740f9b 100644
--- a/deployments/.env
+++ b/deployments/.env
@@ -1,7 +1 @@
IMAGE_NAME=nexus3.onap.org:10003/onap/multicloud/k8s
-CSAR_DIR=/opt/csar
-KUBE_CONFIG_DIR=/opt/kubeconfig
-DATABASE_TYPE=mongo
-DATABASE_IP=172.19.0.2
-PLUGIN_IP=172.19.0.3
-PLUGINS_DIR=/opt/multicloud/k8s
diff --git a/deployments/_functions.sh b/deployments/_functions.sh
new file mode 100755
index 00000000..7942aed0
--- /dev/null
+++ b/deployments/_functions.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+function stop_all {
+ docker-compose kill
+ docker-compose down
+}
+
+function start_mongo {
+ docker-compose up -d mongo
+ export DATABASE_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $(docker ps -aqf "name=mongo"))
+ export no_proxy=${no_proxy:-},${DATABASE_IP}
+ export NO_PROXY=${NO_PROXY:-},${DATABASE_IP}
+}
+
+function generate_k8sconfig {
+cat << EOF > k8sconfig.json
+{
+ "database-address": "${DATABASE_IP}",
+ "database-type": "mongo",
+ "plugin-dir": "plugins",
+ "service-port": "9015"
+}
+EOF
+}
+
+function start_all {
+ docker-compose up -d
+}
diff --git a/deployments/docker-compose.yml b/deployments/docker-compose.yml
index c4faa537..3fa82fe5 100644
--- a/deployments/docker-compose.yml
+++ b/deployments/docker-compose.yml
@@ -9,53 +9,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-version: '2'
+version: '3'
services:
multicloud-k8s:
image: ${IMAGE_NAME}
- networks:
- multicloud_net:
- ipv4_address: ${PLUGIN_IP}
build:
context: ./
args:
- HTTP_PROXY=${HTTP_PROXY}
- HTTPS_PROXY=${HTTPS_PROXY}
- NO_PROXY=${NO_PROXY}
- ports:
- - "8081:8081"
environment:
- - CSAR_DIR=${CSAR_DIR}
- - KUBE_CONFIG_DIR=${KUBE_CONFIG_DIR}
- - DATABASE_TYPE=${DATABASE_TYPE}
- - DATABASE_IP=${DATABASE_IP}
- - PLUGINS_DIR=${PLUGINS_DIR}
- HTTP_PROXY=${HTTP_PROXY}
- HTTPS_PROXY=${HTTPS_PROXY}
- - NO_PROXY=${NO_PROXY},${DATABASE_IP}
+ - NO_PROXY=${NO_PROXY},mongo
depends_on:
- mongo
- links:
- - mongo
+ network_mode: host
volumes:
- /opt/csar:/opt/csar
- - /opt/kubeconfig:/opt/kubeconfig
+ - ${PWD}/k8sconfig.json:/opt/multicloud/k8splugin/k8sconfig.json:ro
mongo:
image: mongo
- networks:
- multicloud_net:
- ipv4_address: ${DATABASE_IP}
environment:
- HTTP_PROXY=${HTTP_PROXY}
- HTTPS_PROXY=${HTTPS_PROXY}
- NO_PROXY=${NO_PROXY}
-
-networks:
- multicloud_net:
- driver: bridge
- ipam:
- driver: default
- config:
- -
- subnet: 172.19.0.0/27
diff --git a/deployments/start-dev.sh b/deployments/start-dev.sh
new file mode 100755
index 00000000..c4b0cbc4
--- /dev/null
+++ b/deployments/start-dev.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _functions.sh
+
+#
+# Start k8splugin from compiled binaries to foreground. This is usable for development use.
+#
+source /etc/environment
+k8s_path="$(git rev-parse --show-toplevel)"
+
+stop_all
+start_mongo
+
+echo "Compiling source code"
+pushd $k8s_path/src/k8splugin/
+generate_k8sconfig
+make all
+./k8plugin
+popd
diff --git a/deployments/start.sh b/deployments/start.sh
index a57a6377..e7ff1334 100755
--- a/deployments/start.sh
+++ b/deployments/start.sh
@@ -8,33 +8,16 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+set -o errexit
set -o nounset
set -o pipefail
-source /etc/environment
+source _functions.sh
-k8s_path="$(git rev-parse --show-toplevel)"
-export GOPATH=$k8s_path
-export GO111MODULE=on
-
-echo "Starting mongo services"
-docker-compose kill
-docker-compose up -d mongo
-export DATABASE_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $(docker ps -aqf "name=mongo"))
-export no_proxy=${no_proxy:-},$DATABASE_IP
-export NO_PROXY=${NO_PROXY:-},$DATABASE_IP
-
-echo "Compiling source code"
-pushd $k8s_path/src/k8splugin/
-cat << EOF > k8sconfig.json
-{
- "database-address": "$DATABASE_IP",
- "database-type": "mongo",
- "plugin-dir": "$(pwd)/plugins",
- "service-port": "9015",
- "kube-config-dir": "$(pwd)/kubeconfigs"
-}
-EOF
-make all
-./k8plugin
-popd
+#
+# Start k8splugin from containers. build.sh should be run prior this script.
+#
+stop_all
+start_mongo
+generate_k8sconfig
+start_all
diff --git a/docs/bare_metal_provisioning.rst b/docs/bare_metal_provisioning.rst
index 2cb74afe..885ffea3 100644
--- a/docs/bare_metal_provisioning.rst
+++ b/docs/bare_metal_provisioning.rst
@@ -13,11 +13,11 @@
Bare-Metal Provisioning
***********************
-The Kubernetes Reference Deployment, aka KRD, has been designed to be consumed
-by Virtual Machines as well as Bare-Metal servers. The *vagrant/aio.sh*
+The Kubernetes Deployment, aka KUD, has been designed to be consumed
+by Virtual Machines as well as Bare-Metal servers. The *baremetal/aio.sh*
script contains the bash instructions for provisioning an All-in-One Kubernetes
deployment in a Bare-Metal server. This document lists the Hardware & Software
-requirements and walkthrough the instructions that *vagrant/aio.sh* contains.
+requirements and walkthrough the instructions that *baremetal/aio.sh* contains.
Hardware Requirements
#####################
@@ -37,36 +37,28 @@ Software Requirements
- Ubuntu Server 16.04 LTS
-vagrant/aio.sh
-##############
+baremetal/aio.sh
+################
This bash script provides an automated process for deploying an All-in-One
-Kubernetes cluster. Given that the ansible inventory file created by this
-script doesn't specify any information about user and password, it's necessary
-to execute this script as root user.
+Kubernetes cluster.
The following two instructions start the provisioning process.
.. code-block:: bash
$ sudo su
- # wget -O - https://git.onap.org/multicloud/k8s/plain/vagrant/aio.sh | bash
+ # git clone https://git.onap.org/multicloud/k8s/
+ # cd k8s/kud/hosting_providers/baremetal/
+ # ./aio.sh
In overall, this script can be summarized in three general phases:
-1. Cloning and configuring the KRD project.
+1. Generating Inventory.
2. Enabiling Nested-Virtualization.
-3. Deploying KRD services.
+3. Deploying KUD services.
-**Cloning and configuring the KRD project**
-
-KRD requires multiple files(bash scripts and ansible playbooks) to operate.
-Therefore, it's necessary to clone the *ONAP multicloud/k8s* project to get
-access to the *vagrant* folder.
-
-.. code-block:: bash
-
- git clone https://git.onap.org/multicloud/k8s/
+**Inventory**
Ansible works agains multiple systems, the way for selecting them is through the
usage of the inventory. The inventory file is a static source for determining the
@@ -102,17 +94,10 @@ an inventory file for addressing those tasks to localhost.
kube-master
EOL
-KRD consumes kubespray_ for provisioning a Kubernetes base deployment. As part
-of the deployment process, this tool downloads and configures *kubectl* binary.
-This action conflicts with *andrewrothstein.kubectl* ansible role. Therefore is
-necessary to remove those instructions from all the ansible playbooks.
+KUD consumes kubespray_ for provisioning a Kubernetes base deployment.
.. _kubespray: https://github.com/kubernetes-incubator/kubespray
-.. code-block:: bash
-
- # sed -i '/andrewrothstein.kubectl/d' playbooks/configure-*.yml
-
Ansible uses SSH protocol for executing remote instructions. The following
instructions create and register ssh keys which avoid the usage of passwords.
@@ -124,7 +109,7 @@ instructions create and register ssh keys which avoid the usage of passwords.
**Enabling Nested-Virtualization**
-KRD installs Virtlet_ Kubernetes CRI for running Virtual Machine workloads.
+KUD installs Virtlet_ Kubernetes CRI for running Virtual Machine workloads.
Nested-virtualization gives the ability of running a Virtual Machine within
another. The *node.sh* bash script contains the instructions for enabling
Nested-Virtualization.
@@ -135,14 +120,14 @@ Nested-Virtualization.
# ./node.sh
-**Deploying KRD services**
+**Deploying KUD services**
Finally, the KRD provisioning process can be started through the use of
*installer.sh* bash script. The output of this script is collected in the
-*krd_installer.log* file for future reference.
+*kud_installer.log* file for future reference.
.. code-block:: bash
- # ./installer.sh | tee krd_installer.log
+ # ./installer.sh | tee kud_installer.log
.. image:: ./img/installer_workflow.png
diff --git a/docs/index.rst b/docs/index.rst
index 173076b4..7a49b6d1 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -8,7 +8,7 @@ over cloud regions that support Kubernetes as the orchestrator.
Kubernetes Reference Deployment
===============================
-The Kubernetes Reference Deployment(KRD) provides an
+The Kubernetes Reference Deployment(KUD) provides an
automated mechanism to install and configure the Kubernetes
services required for the MultiCloud Kubernetes plugin.
@@ -18,6 +18,6 @@ Table of contents
.. toctree::
:maxdepth: 3
- KRD Project Architecture <krd_architecture>
- Bare Metal All-in-One KRD deployment<bare_metal_provisioning>
+ KUD Project Architecture <kud_architecture>
+ Bare Metal All-in-One KUD deployment<bare_metal_provisioning>
Kubernetes MultiCloud API sample ommands <sampleCommands>
diff --git a/docs/krd_architecture.rst b/docs/kud_architecture.rst
index f188135e..f56b72a5 100644
--- a/docs/krd_architecture.rst
+++ b/docs/kud_architecture.rst
@@ -10,7 +10,7 @@
limitations under the License.
****************
-KRD Architecture
+KUD Architecture
****************
This document explains the different components of the Kubernetes
@@ -78,13 +78,7 @@ installer.sh
############
Main bash script that installs dependencies and executes ansible
-playbooks for provisioning KRD components on external nodes. This
-script uses some arguments for the additional installation of
-components. For more information about its usage:
-
-.. code-block:: bash
-
- ./installer.sh -h
+playbooks for provisioning KUD components on external nodes.
inventory/
##########
@@ -116,17 +110,17 @@ This folder contains a set of Ansible playbooks which perform the
tasks required for configuring services like Multus, Virtlet and/or
OVN.
-playbooks/configure-krd.yml
+playbooks/configure-kud.yml
***************************
This ansible playbook collects the common actions among all the
-Kubernetes AddOns offered by the KRD.
+Kubernetes AddOns offered by the KUD.
-playbooks/krd-vars.yml
+playbooks/kud-vars.yml
************************
This file centralizes the version numbers and source URLs used for
-different components offered by the KRD. Bumping a version requires
+different components offered by the KUD. Bumping a version requires
extensive testing to ensure compatibility.
setup.sh
@@ -158,5 +152,5 @@ tests/
This folder contains the health check scripts that guarantee the
proper installation/configuration of Kubernetes AddOns. Its
execution is disabled by default. In order to enable it, it's
-necessary to pass the *-t* argument to the **installer.sh** bash
+necessary to export KUD_ENABLE_TESTS=true environment variable before calling the **installer.sh** bash
script, usually through changing the arguments in the *Vagrantfile*.
diff --git a/kud/hosting_providers/vagrant/aio.sh b/kud/hosting_providers/baremetal/aio.sh
index 0d06cc65..c9903cd3 100755
--- a/kud/hosting_providers/vagrant/aio.sh
+++ b/kud/hosting_providers/baremetal/aio.sh
@@ -12,18 +12,16 @@ set -o errexit
set -o nounset
set -o pipefail
-if [[ $(whoami) != 'root' ]];then
- echo "This bash script must be executed as root user"
- exit 1
-fi
-
-echo "Cloning and configuring KUD project..."
-rm -rf k8s
-git clone https://git.onap.org/multicloud/k8s/
-cd k8s/kud/hosting_providers/baremetal/
+aio_dir=$(cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd)
+cd ${aio_dir}/../vagrant
+
+# For aio inventory by default get ovn central ip from local host default interface.
+# This variable used only in this file, but env variable defined to enable user to override it prior calling aio.sh.
+OVN_CENTRAL_IP_ADDRESS=${OVN_CENTRAL_IP_ADDRESS:-$(hostname -I | cut -d ' ' -f 1)}
+
cat <<EOL > inventory/hosts.ini
[all]
-localhost
+localhost ansible_ssh_host=${OVN_CENTRAL_IP_ADDRESS} ansible_ssh_port=22
[kube-master]
localhost
@@ -47,14 +45,14 @@ localhost
kube-node
kube-master
EOL
-sed -i '/andrewrothstein.kubectl/d' ../../deployment_infra/playbooks/configure-*.yml
+
rm -f ~/.ssh/id_rsa
echo -e "\n\n\n" | ssh-keygen -t rsa -N ""
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod og-wx ~/.ssh/authorized_keys
echo "Enabling nested-virtualization"
-./node.sh
+sudo ./node.sh
-echo "Deploying KRD project"
+echo "Deploying KUD project"
./installer.sh | tee kud_installer.log
diff --git a/kud/hosting_providers/vagrant/Vagrantfile b/kud/hosting_providers/vagrant/Vagrantfile
index d068b84a..2d1b5ab4 100644
--- a/kud/hosting_providers/vagrant/Vagrantfile
+++ b/kud/hosting_providers/vagrant/Vagrantfile
@@ -120,7 +120,7 @@ Vagrant.configure("2") do |config|
installer.vm.network :private_network, :ip => "10.10.10.2", :type => :static
installer.vm.synced_folder '../../../', '/home/vagrant/multicloud-k8s/', type: sync_type
installer.vm.provision 'shell', privileged: false do |sh|
- sh.env = {'KUD_PLUGIN_ENABLED': 'false'}
+ sh.env = {'KUD_PLUGIN_ENABLED': 'false', 'OVN_CENTRAL_INTERFACE': 'eth1'}
sh.inline = <<-SHELL
cp /vagrant/insecure_keys/key.pub /home/vagrant/.ssh/id_rsa.pub
cp /vagrant/insecure_keys/key /home/vagrant/.ssh/id_rsa
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index 1d84646c..ca14bad7 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -9,8 +9,13 @@
##############################################################################
set -o errexit
+set -o nounset
set -o pipefail
+INSTALLER_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd)
+
+source ${INSTALLER_DIR}/../../tests/_functions.sh
+
# _install_go() - Install GoLang package
function _install_go {
version=$(grep "go_version" ${kud_playbooks}/kud-vars.yml | awk -F "'" '{print $2}')
@@ -63,15 +68,15 @@ function _install_docker {
sudo apt-get install -y docker-ce
sudo mkdir -p /etc/systemd/system/docker.service.d
- if [ $http_proxy ]; then
+ if [ ${http_proxy:-} ]; then
echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf
echo "Environment=\"HTTP_PROXY=$http_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/http-proxy.conf
fi
- if [ $https_proxy ]; then
+ if [ ${https_proxy:-} ]; then
echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/https-proxy.conf
echo "Environment=\"HTTPS_PROXY=$https_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/https-proxy.conf
fi
- if [ $no_proxy ]; then
+ if [ ${no_proxy:-} ]; then
echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/no-proxy.conf
echo "Environment=\"NO_PROXY=$no_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/no-proxy.conf
fi
@@ -86,13 +91,12 @@ function _install_docker {
}
function _set_environment_file {
- ansible_ifconfig=$(ansible ovn-central[0] -i $kud_inventory -m shell -a "ifconfig eth1 |grep \"inet addr\" |awk '{print \$2}' |awk -F: '{print \$2}'")
- if [[ $ansible_ifconfig != *CHANGED* ]]; then
- echo "Fail to get the OVN central IP address from eth1 nic"
- exit
- fi
- echo "export OVN_CENTRAL_ADDRESS=$(echo ${ansible_ifconfig#*>>} | tr '\n' ':')6641" | sudo tee --append /etc/environment
+ # By default ovn central interface is the first active network interface on localhost. If other wanted, need to export this variable in aio.sh or Vagrant file.
+ OVN_CENTRAL_INTERFACE=${OVN_CENTRAL_INTERFACE:-$(ip addr show | awk '/inet.*brd/{print $NF; exit}')}
+ echo "export OVN_CENTRAL_INTERFACE=${OVN_CENTRAL_INTERFACE}" | sudo tee --append /etc/environment
+ echo "export OVN_CENTRAL_ADDRESS=$(get_ovn_central_address)" | sudo tee --append /etc/environment
echo "export KUBE_CONFIG_DIR=/opt/kubeconfig" | sudo tee --append /etc/environment
+ echo "export CSAR_DIR=/opt/csar" | sudo tee --append /etc/environment
}
# install_k8s() - Install Kubernetes using kubespray tool
@@ -102,7 +106,7 @@ function install_k8s {
version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | awk -F ': ' '{print $2}')
local_release_dir=$(grep "local_release_dir" $kud_inventory_folder/group_vars/k8s-cluster.yml | awk -F "\"" '{print $2}')
local tarball=v$version.tar.gz
- sudo apt-get install -y sshpass
+ sudo apt-get install -y sshpass make unzip # install make to run mitogen target and unzip is mitogen playbook dependency
_install_docker
_install_ansible
wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball
@@ -112,18 +116,21 @@ function install_k8s {
sudo mkdir -p ${local_release_dir}/containers
rm $tarball
- sudo -E pip install -r $dest_folder/kubespray-$version/requirements.txt
+ pushd $dest_folder/kubespray-$version/
+ sudo -E pip install -r ./requirements.txt
+ make mitogen
+ popd
rm -f $kud_inventory_folder/group_vars/all.yml 2> /dev/null
- if [[ -n "${verbose}" ]]; then
+ if [[ -n "${verbose:-}" ]]; then
echo "kube_log_level: 5" | tee $kud_inventory_folder/group_vars/all.yml
else
echo "kube_log_level: 2" | tee $kud_inventory_folder/group_vars/all.yml
fi
echo "kubeadm_enabled: true" | tee --append $kud_inventory_folder/group_vars/all.yml
- if [[ -n "${http_proxy}" ]]; then
+ if [[ -n "${http_proxy:-}" ]]; then
echo "http_proxy: \"$http_proxy\"" | tee --append $kud_inventory_folder/group_vars/all.yml
fi
- if [[ -n "${https_proxy}" ]]; then
+ if [[ -n "${https_proxy:-}" ]]; then
echo "https_proxy: \"$https_proxy\"" | tee --append $kud_inventory_folder/group_vars/all.yml
fi
ansible-playbook $verbose -i $kud_inventory $dest_folder/kubespray-$version/cluster.yml --become --become-user=root | sudo tee $log_folder/setup-kubernetes.log
@@ -162,13 +169,12 @@ function install_plugin {
sudo mkdir -p /opt/{kubeconfig,consul/config}
sudo cp $HOME/.kube/config /opt/kubeconfig/kud
- _set_environment_file
source /etc/environment
pushd $kud_folder/../../../deployments
sudo ./build.sh
if [[ "${testing_enabled}" == "true" ]]; then
- docker-compose up -d
+ sudo ./start.sh
pushd $kud_tests
for functional_test in plugin plugin_edgex; do
bash ${functional_test}.sh
@@ -196,6 +202,7 @@ function _print_kubernetes_info {
echo "Admin password: secret" >> $k8s_info_file
}
+sudo -k # forgot sudo password
if ! sudo -n "true"; then
echo ""
echo "passwordless sudo is needed for '$(id -nu)' user."
@@ -206,26 +213,26 @@ if ! sudo -n "true"; then
exit 1
fi
-if [[ -n "${KUD_DEBUG}" ]]; then
+verbose=""
+if [[ -n "${KUD_DEBUG:-}" ]]; then
set -o xtrace
verbose="-vvv"
fi
# Configuration values
log_folder=/var/log/kud
-kud_folder=$(pwd)
+kud_folder=${INSTALLER_DIR}
kud_infra_folder=$kud_folder/../../deployment_infra
export kud_inventory_folder=$kud_folder/inventory
kud_inventory=$kud_inventory_folder/hosts.ini
kud_playbooks=$kud_infra_folder/playbooks
-kud_tests=$kud_folder/tests
+kud_tests=$kud_folder/../../tests
k8s_info_file=$kud_folder/k8s_info.log
testing_enabled=${KUD_ENABLE_TESTS:-false}
sudo mkdir -p $log_folder
sudo mkdir -p /opt/csar
sudo chown -R $USER /opt/csar
-echo "export CSAR_DIR=/opt/csar" | sudo tee --append /etc/environment
# Install dependencies
# Setup proxy variables
@@ -236,6 +243,7 @@ fi
sudo apt-get update
install_k8s
install_addons
+_set_environment_file
if ${KUD_PLUGIN_ENABLED:-false}; then
install_plugin
fi
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
index 542443d6..5e6314ce 100755
--- a/kud/tests/_functions.sh
+++ b/kud/tests/_functions.sh
@@ -12,6 +12,8 @@ set -o errexit
set -o nounset
set -o pipefail
+FUNCTIONS_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd)
+
source /etc/environment
function print_msg {
@@ -22,10 +24,10 @@ function print_msg {
echo -e "${RED} $msg ---------------------------------------${NC}"
}
-function _get_ovn_central_address {
- ansible_ifconfig=$(ansible ovn-central[0] -i $test_folder/../hosting_providers/vagrant/inventory/hosts.ini -m shell -a "ifconfig eth1 |grep \"inet addr\" |awk '{print \$2}' |awk -F: '{print \$2}'")
+function get_ovn_central_address {
+ ansible_ifconfig=$(ansible ovn-central[0] -i ${FUNCTIONS_DIR}/../hosting_providers/vagrant/inventory/hosts.ini -m shell -a "ifconfig ${OVN_CENTRAL_INTERFACE} |grep \"inet addr\" |awk '{print \$2}' |awk -F: '{print \$2}'")
if [[ $ansible_ifconfig != *CHANGED* ]]; then
- echo "Fail to get the OVN central IP address from eth1 nic"
+ echo "Fail to get the OVN central IP address from ${OVN_CENTRAL_INTERFACE} nic"
exit
fi
echo "$(echo ${ansible_ifconfig#*>>} | tr '\n' ':')6641"
@@ -39,7 +41,7 @@ function init_network {
name=$(cat $fname | yq '.spec.name' | xargs)
subnet=$(cat $fname | yq '.spec.subnet' | xargs)
gateway=$(cat $fname | yq '.spec.gateway' | xargs)
- ovn_central_address=$(_get_ovn_central_address)
+ ovn_central_address=$(get_ovn_central_address)
router_mac=$(printf '00:00:00:%02X:%02X:%02X' $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)))
ovn-nbctl --may-exist --db tcp:$ovn_central_address ls-add $name -- set logical_switch $name other-config:subnet=$subnet external-ids:gateway_ip=$gateway
@@ -52,7 +54,7 @@ function cleanup_network {
local fname=$1
name=$(cat $fname | yq '.spec.name' | xargs)
- ovn_central_address=$(_get_ovn_central_address)
+ ovn_central_address=$(get_ovn_central_address)
for cmd in "ls-del $name" "lrp-del rtos-$name" "lsp-del stor-$name"; do
ovn-nbctl --if-exist --db tcp:$ovn_central_address $cmd
@@ -111,6 +113,10 @@ function wait_deployment {
# setup() - Base testing setup shared among functional tests
function setup {
+ if ! $(kubectl version &>/dev/null); then
+ echo "This funtional test requires kubectl client"
+ exit 1
+ fi
for deployment_name in $@; do
recreate_deployment $deployment_name
done
@@ -126,9 +132,4 @@ function teardown {
destroy_deployment $deployment_name
done
}
-
-if ! $(kubectl version &>/dev/null); then
- echo "This funtional test requires kubectl client"
- exit 1
-fi
-test_folder=$(pwd)
+test_folder=${FUNCTIONS_DIR}