aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTodd Malsbary <todd.malsbary@intel.com>2020-11-11 12:13:03 -0800
committerTodd Malsbary <todd.malsbary@intel.com>2021-10-12 10:57:04 -0700
commit988ebcf24c7d854a4053437b69cfb120acb8039c (patch)
tree6504bc0acaab449f44dc3d71a182984c2469b43b
parent5aa8c4de9fd620ef42ac5bf73b62f76d80e713a0 (diff)
Allow KuD installer to specify Kubespray 2.14.1 or 2.16.00.9.2
The default is 2.14.1 (the existing value). Setting KUBESPRAY_VERSION=2.16.0 in the installer environment uses the newer Kubespray version. The newer Kubespray version installs Kubernetes 1.20.7. Kubernetes 1.20.7 comes with following caveats: - The Virtlet addon is disabled; it does not work with 1.20.7. This requires removing the plugin_fw test as well. - Kubernetes 1.20.7 removed support for basic auth. Issue-ID: MULTICLOUD-1251 Signed-off-by: Todd Malsbary <todd.malsbary@intel.com> Change-Id: Ic8b9fb1f3effc31da58de5bb3768ed9e509d50de
-rw-r--r--kud/deployment_infra/galaxy-requirements.yml4
-rw-r--r--kud/deployment_infra/playbooks/kud-vars.yml3
-rw-r--r--kud/hosting_providers/containerized/README.md4
-rwxr-xr-xkud/hosting_providers/containerized/installer.sh24
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml8
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh49
-rw-r--r--kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml2
-rwxr-xr-xkud/tests/_functions.sh7
-rwxr-xr-xkud/tests/emco.sh3
-rwxr-xr-xkud/tests/kubevirt.sh3
-rwxr-xr-xkud/tests/negative_tests/test_all_intents.sh4
-rwxr-xr-xkud/tests/negative_tests/test_composite_app.sh4
-rwxr-xr-xkud/tests/negative_tests/test_controllers.sh4
-rwxr-xr-xkud/tests/negative_tests/test_deployment_intent_group.sh4
-rwxr-xr-xkud/tests/negative_tests/test_generic_placement_intent.sh4
-rwxr-xr-xkud/tests/negative_tests/test_generic_placement_intent_app.sh4
-rwxr-xr-xkud/tests/negative_tests/test_multipart.sh4
-rwxr-xr-xkud/tests/negative_tests/test_profile.sh4
-rwxr-xr-xkud/tests/negative_tests/test_profile_apps.sh4
-rwxr-xr-xkud/tests/onap4k8s.sh4
-rwxr-xr-xkud/tests/plugin_collection_v2.sh4
-rwxr-xr-xkud/tests/plugin_eaa.sh4
-rwxr-xr-xkud/tests/plugin_edgex.sh4
-rwxr-xr-xkud/tests/plugin_fw.sh4
-rwxr-xr-xkud/tests/plugin_fw_v2.sh3
25 files changed, 83 insertions, 83 deletions
diff --git a/kud/deployment_infra/galaxy-requirements.yml b/kud/deployment_infra/galaxy-requirements.yml
index 9747dc99..0dc5d896 100644
--- a/kud/deployment_infra/galaxy-requirements.yml
+++ b/kud/deployment_infra/galaxy-requirements.yml
@@ -7,9 +7,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- src: andrewrothstein.go
- version: v2.1.15
- src: andrewrothstein.kubernetes-helm
- version: v1.3.16
+ version: v2.0.7
- src: geerlingguy.docker
version: 2.5.2
diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
index d627ab22..08f7e19b 100644
--- a/kud/deployment_infra/playbooks/kud-vars.yml
+++ b/kud/deployment_infra/playbooks/kud-vars.yml
@@ -53,9 +53,6 @@ cmk_pkgs: make,jq
cmk_untaint_required: true
go_version: '1.14.15'
-kubespray_version: 2.14.1
-# This matches the helm_version from kubespray defaults
-helm_client_version: 3.2.4
# kud playbooks not compatible with 2.8.0 - see MULTICLOUD-634
ansible_version: 2.9.7
diff --git a/kud/hosting_providers/containerized/README.md b/kud/hosting_providers/containerized/README.md
index bd5b08a8..08664ab5 100644
--- a/kud/hosting_providers/containerized/README.md
+++ b/kud/hosting_providers/containerized/README.md
@@ -149,13 +149,13 @@ Multi - cluster information from the host machine;
```
$ kubectl --kubeconfig=/opt/kud/multi-cluster/cluster-101/artifacts/admin.conf cluster-info
-Kubernetes master is running at https://192.168.121.2:6443
+Kubernetes control plane is running at https://192.168.121.2:6443
coredns is running at https://192.168.121.2:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
kubernetes-dashboard is running at https://192.168.121.2:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
$ kubectl --kubeconfig=/opt/kud/multi-cluster/cluster-102/artifacts/admin.conf cluster-info
-Kubernetes master is running at https://192.168.121.6:6443
+Kubernetes control plane is running at https://192.168.121.6:6443
coredns is running at https://192.168.121.6:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
kubernetes-dashboard is running at https://192.168.121.6:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh
index 18ebfff3..e8c0b86a 100755
--- a/kud/hosting_providers/containerized/installer.sh
+++ b/kud/hosting_providers/containerized/installer.sh
@@ -46,8 +46,7 @@ function _install_ansible {
function install_kubespray {
echo "Deploying kubernetes"
- version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
- awk -F ': ' '{print $2}')
+ version=$kubespray_version
local_release_dir=$(grep "local_release_dir" \
$kud_inventory_folder/group_vars/k8s-cluster.yml | \
awk -F "\"" '{print $2}')
@@ -139,7 +138,7 @@ function install_addons {
$kud_infra_folder/galaxy-requirements.yml --ignore-errors
ansible-playbook $verbose -i \
- $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml \
+ $kud_inventory -e "base_dest=$HOME" -e "helm_client_version=$helm_client_version" $kud_playbooks/configure-kud.yml \
| tee $cluster_log/setup-kud.log
kud_addons="${KUD_ADDONS:-} ${plugins_name}"
@@ -175,17 +174,10 @@ function install_addons {
case $addon in
"onap4k8s" )
echo "Test the onap4k8s plugin installation"
- for functional_test in plugin_edgex plugin_fw plugin_eaa; do
+ for functional_test in plugin_edgex plugin_eaa; do
bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
done
;;
- "emco" )
- echo "Test the emco plugin installation"
- # TODO plugin_fw_v2 requires virtlet and a patched multus to succeed
- # for functional_test in plugin_fw_v2; do
- # bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
- # done
- ;;
esac
popd
done
@@ -285,6 +277,8 @@ if [[ -n "${KUD_DEBUG:-}" ]]; then
fi
# Configuration values
+kubespray_version="2.16.0"
+helm_client_version="3.5.4"
dest_folder=/opt
kud_folder=${INSTALLER_DIR}
kud_infra_folder=$kud_folder/../../deployment_infra
@@ -299,8 +293,11 @@ kata_webhook_deployed=false
# For containerd the etcd_deployment_type: docker is the default and doesn't work.
# You have to use either etcd_kubeadm_enabled: true or etcd_deployment_type: host
# See https://github.com/kubernetes-sigs/kubespray/issues/5713
+#
+# The JSON notation below is used to prevent false from being interpreted as a
+# string by ansible.
kud_kata_override_variables="container_manager=containerd \
- -e etcd_deployment_type=host -e kubelet_cgroup_driver=cgroupfs"
+ -e etcd_deployment_type=host"
mkdir -p /opt/csar
export CSAR_DIR=/opt/csar
@@ -311,8 +308,7 @@ function install_pkg {
}
function install_cluster {
- version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
- awk -F ': ' '{print $2}')
+ version=$kubespray_version
export ANSIBLE_CONFIG=$dest_folder/kubespray-$version/ansible.cfg
install_k8s $1
if [ ${2:+1} ]; then
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
index bfbd57b3..be506d10 100644
--- a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
@@ -31,7 +31,7 @@ kube_users:
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
#kube_oidc_auth: false
-kube_basic_auth: true
+#kube_basic_auth: true
kube_token_auth: true
# Choose network plugin (calico, contiv, weave or flannel)
@@ -47,6 +47,9 @@ kubectl_localhost: true
local_volumes_enabled: true
local_volume_provisioner_enabled: true
+## Change this to use another Kubernetes version, e.g. a current beta release
+kube_version: v1.20.7
+
# Helm deployment
helm_enabled: true
helm_stable_repo_url: "https://charts.helm.sh/stable"
@@ -127,3 +130,6 @@ podsecuritypolicy_restricted_spec:
kubelet_node_config_extra_args:
cpuManagerPolicy: "static" # Options: none (disabled), static (default)
topologyManagerPolicy: "best-effort" # Options: none (disabled), best-effort (default), restricted, single-numa-node
+
+# Deploy the Kubernetes dashboard
+dashboard_enabled: true
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index cf6c0362..099874a5 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -68,7 +68,7 @@ function _set_environment_file {
function install_k8s {
echo "Deploying kubernetes"
local dest_folder=/opt
- version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | awk -F ': ' '{print $2}')
+ version=$kubespray_version
local_release_dir=$(grep "local_release_dir" $kud_inventory_folder/group_vars/k8s-cluster.yml | awk -F "\"" '{print $2}')
local tarball=v$version.tar.gz
sudo apt-get install -y sshpass make unzip # install make to run mitogen target and unzip is mitogen playbook dependency
@@ -85,10 +85,19 @@ function install_k8s {
make mitogen
popd
rm -f $kud_inventory_folder/group_vars/all.yml 2> /dev/null
+ if [[ -n "${kube_version:-}" ]]; then
+ echo "kube_version: $kube_version" | tee --append $kud_inventory_folder/group_vars/all.yml
+ fi
+ if [[ -n "${kube_basic_auth:-}" ]]; then
+ echo "kube_basic_auth: $kube_basic_auth" | tee --append $kud_inventory_folder/group_vars/all.yml
+ fi
+ if [[ -n "${dashboard_enabled:-}" ]]; then
+ echo "dashboard_enabled: $dashboard_enabled" | tee --append $kud_inventory_folder/group_vars/all.yml
+ fi
if [[ -n "${verbose:-}" ]]; then
- echo "kube_log_level: 5" | tee $kud_inventory_folder/group_vars/all.yml
+ echo "kube_log_level: 5" | tee --append $kud_inventory_folder/group_vars/all.yml
else
- echo "kube_log_level: 2" | tee $kud_inventory_folder/group_vars/all.yml
+ echo "kube_log_level: 2" | tee --append $kud_inventory_folder/group_vars/all.yml
fi
echo "kubeadm_enabled: true" | tee --append $kud_inventory_folder/group_vars/all.yml
if [[ -n "${http_proxy:-}" ]]; then
@@ -109,7 +118,7 @@ function install_k8s {
--become-user=root | sudo tee $log_folder/setup-kubernetes.log
elif [ "$container_runtime" == "containerd" ]; then
/bin/echo -e "\n\e[1;42mContainerd will be used as the container runtime interface\e[0m"
- # Because the kud_kata_override_variable has its own quotations in it
+ # Because the kud_kata_override_variables has its own quotations in it
# a eval command is needed to properly execute the ansible script
ansible_kubespray_cmd="ansible-playbook $verbose -i $kud_inventory \
$dest_folder/kubespray-$version/cluster.yml \
@@ -137,16 +146,19 @@ function install_addons {
echo "Installing Kubernetes AddOns"
_install_ansible
sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors
- ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" -e "helm_client_version=$helm_client_version" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
# The order of KUD_ADDONS is important: some plugins (sriov, qat)
# require nfd to be enabled. Some addons are not currently supported with containerd
if [ "${container_runtime}" == "docker" ]; then
- kud_addons=${KUD_ADDONS:-virtlet ovn4nfv nfd sriov \
- qat optane cmk}
+ default_addons="virtlet ovn4nfv nfd sriov qat optane cmk"
+ if [[ $kubespray_version == "2.16.0" ]]; then
+ default_addons=${default_addons//virtlet/};
+ fi
elif [ "${container_runtime}" == "containerd" ]; then
- kud_addons=${KUD_ADDONS:-ovn4nfv nfd}
+ default_addons="ovn4nfv nfd"
fi
+ kud_addons=${KUD_ADDONS:-$default_addons}
for addon in ${kud_addons}; do
echo "Deploying $addon using configure-$addon.yml playbook.."
@@ -216,7 +228,11 @@ function install_plugin {
if [[ "${testing_enabled}" == "true" ]]; then
sudo ./start.sh
pushd $kud_tests
- for functional_test in plugin plugin_edgex plugin_fw plugin_eaa; do
+ plugin_tests="plugin plugin_edgex plugin_fw plugin_eaa"
+ if [[ $kubespray_version == "2.16.0" ]]; then
+ plugin_tests=${plugin_tests//plugin_fw/};
+ fi
+ for functional_test in ${plugin_tests}; do
bash ${functional_test}.sh
done
popd
@@ -260,6 +276,15 @@ if [[ -n "${KUD_DEBUG:-}" ]]; then
fi
# Configuration values
+kubespray_version=${KUBESPRAY_VERSION:-2.14.1}
+if [[ $kubespray_version == "2.16.0" ]]; then
+ helm_client_version="3.5.4"
+ kube_version="v1.20.7"
+ dashboard_enabled="true"
+else
+ helm_client_version="3.2.4"
+ kube_basic_auth="true"
+fi
log_folder=/var/log/kud
kud_folder=${INSTALLER_DIR}
kud_infra_folder=$kud_folder/../../deployment_infra
@@ -276,9 +301,15 @@ kata_webhook_deployed=false
# For containerd the etcd_deployment_type: docker is the default and doesn't work.
# You have to use either etcd_kubeadm_enabled: true or etcd_deployment_type: host
# See https://github.com/kubernetes-sigs/kubespray/issues/5713
+#
+# The JSON notation below is used to prevent false from being interpreted as a
+# string by ansible.
kud_kata_override_variables="container_manager=containerd \
-e etcd_deployment_type=host -e kubelet_cgroup_driver=cgroupfs \
-e \"{'download_localhost': false}\" -e \"{'download_run_once': false}\""
+if [[ $kubespray_version == "2.16.0" ]]; then
+ kud_kata_override_variables=${kud_kata_override_variables//-e kubelet_cgroup_driver=cgroupfs/}
+fi
sudo mkdir -p $log_folder
sudo mkdir -p /opt/csar
diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
index 53b5a141..29430374 100644
--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -31,7 +31,7 @@ kube_users:
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
#kube_oidc_auth: false
-kube_basic_auth: true
+#kube_basic_auth: true
kube_token_auth: true
# Choose network plugin (calico, contiv, weave or flannel)
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
index ec415409..367888e5 100755
--- a/kud/tests/_functions.sh
+++ b/kud/tests/_functions.sh
@@ -29,9 +29,12 @@ function print_msg {
echo -e "${RED} $msg ---------------------------------------${NC}"
}
+function control_plane_ip {
+ kubectl get endpoints kubernetes -o jsonpath='{.subsets[].addresses[].ip}'
+}
+
function ssh_cluster {
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F '[:/]' '{print $4}')
- ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${master_ip} -- "$@"
+ ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $(control_plane_ip) -- "$@"
}
function get_ovn_central_address {
diff --git a/kud/tests/emco.sh b/kud/tests/emco.sh
index 1e003968..109f5638 100755
--- a/kud/tests/emco.sh
+++ b/kud/tests/emco.sh
@@ -17,8 +17,7 @@ function delete_resource_nox {
! call_api -X GET "$1" >/dev/null
}
-master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
+master_ip=$(control_plane_ip)
rsync_service_port=30441
rsync_service_host="$master_ip"
base_url_orchestrator=${base_url_orchestrator:-"http://$master_ip:30415/v2"}
diff --git a/kud/tests/kubevirt.sh b/kud/tests/kubevirt.sh
index fbcf1bf6..529a67a7 100755
--- a/kud/tests/kubevirt.sh
+++ b/kud/tests/kubevirt.sh
@@ -39,7 +39,6 @@ if (($try > $tries)); then
fi
# Test
-master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F '[:/]' '{print $4}')
deployment_pod=$(kubectl get pods | grep $kubevirt_vmi_name | awk '{print $1}')
echo "Pod name: $deployment_pod"
echo "ssh testuser@$(kubectl get pods $deployment_pod -o jsonpath="{.status.podIP}")"
@@ -50,7 +49,7 @@ interval=60
for ((try=1;try<=$tries;try++)); do
echo "try $try/$tries: Wait for $interval seconds to check for ssh access"
sleep $interval
- if sshpass -p testuser ssh -o ProxyCommand="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p $master_ip" -o StrictHostKeyChecking=no testuser@$(kubectl get pods $deployment_pod -o jsonpath="{.status.podIP}") -- uptime; then
+ if sshpass -p testuser ssh -o ProxyCommand="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p $(control_plane_ip)" -o StrictHostKeyChecking=no testuser@$(kubectl get pods $deployment_pod -o jsonpath="{.status.podIP}") -- uptime; then
echo "ssh access check is success"
break
fi
diff --git a/kud/tests/negative_tests/test_all_intents.sh b/kud/tests/negative_tests/test_all_intents.sh
index 1f2f721f..00304906 100755
--- a/kud/tests/negative_tests/test_all_intents.sh
+++ b/kud/tests/negative_tests/test_all_intents.sh
@@ -26,10 +26,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/negative_tests/test_composite_app.sh b/kud/tests/negative_tests/test_composite_app.sh
index 8656a1d8..d020b860 100755
--- a/kud/tests/negative_tests/test_composite_app.sh
+++ b/kud/tests/negative_tests/test_composite_app.sh
@@ -26,10 +26,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/negative_tests/test_controllers.sh b/kud/tests/negative_tests/test_controllers.sh
index 20e6bda0..80e2f34f 100755
--- a/kud/tests/negative_tests/test_controllers.sh
+++ b/kud/tests/negative_tests/test_controllers.sh
@@ -25,10 +25,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/negative_tests/test_deployment_intent_group.sh b/kud/tests/negative_tests/test_deployment_intent_group.sh
index 402a0b1d..18857e42 100755
--- a/kud/tests/negative_tests/test_deployment_intent_group.sh
+++ b/kud/tests/negative_tests/test_deployment_intent_group.sh
@@ -26,10 +26,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/negative_tests/test_generic_placement_intent.sh b/kud/tests/negative_tests/test_generic_placement_intent.sh
index 708180f8..4d8834da 100755
--- a/kud/tests/negative_tests/test_generic_placement_intent.sh
+++ b/kud/tests/negative_tests/test_generic_placement_intent.sh
@@ -26,10 +26,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/negative_tests/test_generic_placement_intent_app.sh b/kud/tests/negative_tests/test_generic_placement_intent_app.sh
index 23e7dd00..f0b7fbb8 100755
--- a/kud/tests/negative_tests/test_generic_placement_intent_app.sh
+++ b/kud/tests/negative_tests/test_generic_placement_intent_app.sh
@@ -26,10 +26,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/negative_tests/test_multipart.sh b/kud/tests/negative_tests/test_multipart.sh
index 7f4a84be..ee15bf27 100755
--- a/kud/tests/negative_tests/test_multipart.sh
+++ b/kud/tests/negative_tests/test_multipart.sh
@@ -26,10 +26,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/negative_tests/test_profile.sh b/kud/tests/negative_tests/test_profile.sh
index c5002d50..44cfee23 100755
--- a/kud/tests/negative_tests/test_profile.sh
+++ b/kud/tests/negative_tests/test_profile.sh
@@ -26,10 +26,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/negative_tests/test_profile_apps.sh b/kud/tests/negative_tests/test_profile_apps.sh
index ca8c6454..6e6adbf8 100755
--- a/kud/tests/negative_tests/test_profile_apps.sh
+++ b/kud/tests/negative_tests/test_profile_apps.sh
@@ -26,10 +26,8 @@ source _test_functions.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/onap4k8s.sh b/kud/tests/onap4k8s.sh
index 702bed46..67aae58f 100755
--- a/kud/tests/onap4k8s.sh
+++ b/kud/tests/onap4k8s.sh
@@ -13,13 +13,11 @@ set -o pipefail
source _functions.sh
set +e
-master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
declare -i timeout=18
declare -i interval=10
-base_url="http://$master_ip:$onap_svc_node_port/v1"
+base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
function check_onap_svc {
while ((timeout > 0)); do
diff --git a/kud/tests/plugin_collection_v2.sh b/kud/tests/plugin_collection_v2.sh
index 84f5ca27..5695dfc7 100755
--- a/kud/tests/plugin_collection_v2.sh
+++ b/kud/tests/plugin_collection_v2.sh
@@ -25,10 +25,8 @@ source _common.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/plugin_eaa.sh b/kud/tests/plugin_eaa.sh
index 5cf44e02..1fedbbf3 100755
--- a/kud/tests/plugin_eaa.sh
+++ b/kud/tests/plugin_eaa.sh
@@ -19,10 +19,8 @@ source _common.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/plugin_edgex.sh b/kud/tests/plugin_edgex.sh
index ae390add..ff27ab27 100755
--- a/kud/tests/plugin_edgex.sh
+++ b/kud/tests/plugin_edgex.sh
@@ -19,10 +19,8 @@ source _common.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/plugin_fw.sh b/kud/tests/plugin_fw.sh
index 4d59006b..0a0f62dc 100755
--- a/kud/tests/plugin_fw.sh
+++ b/kud/tests/plugin_fw.sh
@@ -23,10 +23,8 @@ source _common.sh
if [ ${1:+1} ]; then
if [ "$1" == "--external" ]; then
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
onap_svc_node_port=30498
- base_url="http://$master_ip:$onap_svc_node_port/v1"
+ base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
fi
fi
diff --git a/kud/tests/plugin_fw_v2.sh b/kud/tests/plugin_fw_v2.sh
index abab9004..86ef97f9 100755
--- a/kud/tests/plugin_fw_v2.sh
+++ b/kud/tests/plugin_fw_v2.sh
@@ -56,8 +56,7 @@ while [[ $# -gt 0 ]]; do
case $arg in
"--external" )
- service_host=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
+ service_host=$(control_plane_ip)
shift
;;
* )