aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh2
-rwxr-xr-xkud/tests/_common.sh14
-rwxr-xr-xkud/tests/_functions.sh20
-rwxr-xr-xkud/tests/plugin_fw.sh116
-rw-r--r--src/k8splugin/plugins/network/plugin.go16
-rw-r--r--src/k8splugin/plugins/network/plugin_test.go13
6 files changed, 171 insertions, 10 deletions
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index c9027150..51ca22e8 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -176,7 +176,7 @@ function install_plugin {
if [[ "${testing_enabled}" == "true" ]]; then
sudo ./start.sh
pushd $kud_tests
- for functional_test in plugin plugin_edgex; do
+ for functional_test in plugin plugin_edgex plugin_fw; do
bash ${functional_test}.sh
done
popd
diff --git a/kud/tests/_common.sh b/kud/tests/_common.sh
index bfb6ec4b..f9f47f87 100755
--- a/kud/tests/_common.sh
+++ b/kud/tests/_common.sh
@@ -1144,3 +1144,17 @@ function populate_CSAR_edgex_rbdefinition {
tar -czf rb_definition.tar.gz -C $test_folder/vnfs/edgex/helm edgex
popd
}
+
+# populate_CSAR_fw_rbdefinition() - Function that populates CSAR folder
+# for testing resource bundle definition of firewall scenario
+function populate_CSAR_fw_rbdefinition {
+ _checks_args "$1"
+ pushd "${CSAR_DIR}/$1"
+ print_msg "Create Helm Chart Archives for vFirewall"
+ rm -f *.tar.gz
+ # Reuse profile from the edgeX case as it is an empty profile
+ tar -czf rb_profile.tar.gz -C $test_folder/vnfs/edgex/profile .
+ tar -czf rb_definition.tar.gz -C $test_folder/../demo firewall
+ popd
+}
+
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
index 86636ccd..d585086b 100755
--- a/kud/tests/_functions.sh
+++ b/kud/tests/_functions.sh
@@ -173,6 +173,26 @@ function wait_deployment {
done
}
+# wait_for_pod() - Wait until first pod matched by kubectl filters is in running status
+function wait_for_pod {
+ #Example usage:
+ # wait_for_pods example_pod
+ # wait_for_pods --namespace test different_pod
+ # wait_for_pods -n test -l app=plugin_test
+
+ status_phase=""
+ while [[ "$status_phase" != "Running" ]]; do
+ new_phase="$(kubectl get pods -o 'go-template={{ index .items 0 "status" "phase" }}' "$@" )"
+ if [[ "$new_phase" != "$status_phase" ]]; then
+ echo "$(date +%H:%M:%S) - Filter=[$*] : $new_phase"
+ status_phase="$new_phase"
+ fi
+ if [[ "$new_phase" == "Err"* ]]; then
+ exit 1
+ fi
+ done
+}
+
# setup() - Base testing setup shared among functional tests
function setup {
if ! $(kubectl version &>/dev/null); then
diff --git a/kud/tests/plugin_fw.sh b/kud/tests/plugin_fw.sh
new file mode 100755
index 00000000..d7bed4fd
--- /dev/null
+++ b/kud/tests/plugin_fw.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+#set -o xtrace
+
+source _common_test.sh
+source _functions.sh
+source _common.sh
+
+base_url="http://localhost:9015/v1"
+kubeconfig_path="$HOME/.kube/config"
+csar_id=cc009bfe-bbee-11e8-9766-525400435678
+rb_name="vfw"
+rb_version="plugin_test"
+chart_name="firewall"
+profile_name="test_profile"
+release_name="test-release"
+namespace="plugin-tests-namespace"
+cloud_region_id="kud"
+cloud_region_owner="localhost"
+
+# Setup
+install_deps
+populate_CSAR_fw_rbdefinition "$csar_id"
+
+print_msg "Registering resource bundle"
+payload="$(cat <<EOF
+{
+ "rb-name": "${rb_name}",
+ "rb-version": "${rb_version}",
+ "chart-name": "${chart_name}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/rb/definition"
+
+print_msg "Uploading resource bundle content"
+call_api --data-binary "@${CSAR_DIR}/${csar_id}/rb_definition.tar.gz" \
+ "${base_url}/rb/definition/${rb_name}/${rb_version}/content"
+
+print_msg "Registering rb's profile"
+payload="$(cat <<EOF
+{
+ "rb-name": "${rb_name}",
+ "rb-version": "${rb_version}",
+ "profile-name": "${profile_name}",
+ "release-name": "${release_name}",
+ "namespace": "${namespace}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/rb/definition/${rb_name}/${rb_version}/profile"
+
+print_msg "Uploading profile data"
+call_api --data-binary "@${CSAR_DIR}/${csar_id}/rb_profile.tar.gz" \
+ "${base_url}/rb/definition/${rb_name}/${rb_version}/profile/${profile_name}/content"
+
+print_msg "Setup cloud data"
+payload="$(cat <<EOF
+{
+ "cloud-region": "$cloud_region_id",
+ "cloud-owner": "$cloud_region_owner"
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/connectivity-info" >/dev/null #massive output
+
+print_msg "Creating vFW VNF Instance"
+payload="$(cat <<EOF
+{
+ "rb-name": "${rb_name}",
+ "rb-version": "${rb_version}",
+ "profile-name": "${profile_name}",
+ "cloud-region": "${cloud_region_id}"
+}
+EOF
+)"
+response="$(call_api -d "${payload}" "${base_url}/instance")"
+echo "$response"
+vnf_id="$(jq -r '.id' <<< "${response}")"
+
+print_msg "Validating VNF instance"
+# Check if all pods are up
+wait_for_pod -n "${namespace}" -l app=sink
+wait_for_pod -n "${namespace}" -l app=firewall
+wait_for_pod -n "${namespace}" -l app=packetgen
+# TODO: Provide some health check to verify vFW work
+
+print_msg "Retrieving VNF details"
+call_api "${base_url}/instance/${vnf_id}"
+
+
+#Teardown
+print_msg "Deleting VNF Instance"
+delete_resource "${base_url}/instance/${vnf_id}"
+
+print_msg "Deleting Profile"
+delete_resource "${base_url}/rb/definition/${rb_name}/${rb_version}/profile/${profile_name}"
+
+print_msg "Deleting Resource Bundle"
+delete_resource "${base_url}/rb/definition/${rb_name}/${rb_version}"
+
+print_msg "Deleting ${cloud_region_id} cloud region connection"
+delete_resource "${base_url}/connectivity-info/${cloud_region_id}"
diff --git a/src/k8splugin/plugins/network/plugin.go b/src/k8splugin/plugins/network/plugin.go
index ca5aa959..84a5fe51 100644
--- a/src/k8splugin/plugins/network/plugin.go
+++ b/src/k8splugin/plugins/network/plugin.go
@@ -31,14 +31,16 @@ var ExportedVariable networkPlugin
type networkPlugin struct {
}
-func extractData(data string) (cniType, networkName string) {
+func extractData(data string) (cniType, networkName string, err error) {
re := regexp.MustCompile("_")
split := re.Split(data, -1)
- if len(split) != 3 {
+ if len(split) != 2 {
+ err = pkgerrors.New("Couldn't split resource '" + data +
+ "' into CNI type and Network name")
return
}
- cniType = split[1]
- networkName = split[2]
+ cniType = split[0]
+ networkName = split[1]
return
}
@@ -82,7 +84,11 @@ func (p networkPlugin) List(gvk schema.GroupVersionKind, namespace string,
// Delete an existing Network
func (p networkPlugin) Delete(resource helm.KubernetesResource, namespace string, client plugin.KubernetesConnector) error {
- cniType, networkName := extractData(resource.Name)
+ cniType, networkName, err := extractData(resource.Name)
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error extracting CNI type from resource")
+ }
+
typePlugin, ok := utils.LoadedPlugins[cniType+"-network"]
if !ok {
return pkgerrors.New("No plugin for resource " + cniType + " found")
diff --git a/src/k8splugin/plugins/network/plugin_test.go b/src/k8splugin/plugins/network/plugin_test.go
index 586bccb8..33cae1c7 100644
--- a/src/k8splugin/plugins/network/plugin_test.go
+++ b/src/k8splugin/plugins/network/plugin_test.go
@@ -130,18 +130,23 @@ func TestDeleteNetwork(t *testing.T) {
}{
{
label: "Fail to load non-existing plugin",
- input: "test",
- expectedError: "No plugin for resource",
+ input: "non-existing-cni_test",
+ expectedError: "No plugin for resource non-existing-cni",
},
{
- label: "Fail to delete a network",
+ label: "Fail to extract cni from network name",
input: "1_ovn4nfvk8s_test",
+ expectedError: "Error extracting CNI type from resource: Couldn't split resource '1_ovn4nfvk8s_test' into CNI type and Network name",
+ },
+ {
+ label: "Fail to delete a network",
+ input: "ovn4nfvk8s_test",
mockError: "Internal error",
expectedError: "Error during the deletion for ovn4nfvk8s plugin: Internal error",
},
{
label: "Successfully delete a ovn4nfv network",
- input: "1_ovn4nfvk8s_test",
+ input: "ovn4nfvk8s_test",
},
}