aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/k8splugin/csar/parser.go162
-rw-r--r--src/k8splugin/mock_files/mock_yamls/metadata.yaml4
-rw-r--r--tox.ini5
-rw-r--r--vagrant/Vagrantfile2
-rwxr-xr-xvagrant/installer.sh7
-rw-r--r--vagrant/playbooks/krd-vars.yml2
-rwxr-xr-xvagrant/tests/_common.sh576
-rwxr-xr-xvagrant/tests/_functions.sh85
-rwxr-xr-xvagrant/tests/integration_cFW.sh189
-rwxr-xr-xvagrant/tests/integration_vFW.sh303
-rwxr-xr-xvagrant/tests/multus.sh123
-rwxr-xr-xvagrant/tests/plugin.sh36
-rwxr-xr-xvagrant/tests/virtlet.sh143
13 files changed, 845 insertions, 792 deletions
diff --git a/src/k8splugin/csar/parser.go b/src/k8splugin/csar/parser.go
index abd6ad92..af4546c6 100644
--- a/src/k8splugin/csar/parser.go
+++ b/src/k8splugin/csar/parser.go
@@ -28,121 +28,104 @@ import (
"k8splugin/krd"
)
-func generateExternalVNFID(charLen int) string {
- b := make([]byte, charLen/2)
+func generateExternalVNFID() string {
+ b := make([]byte, 2)
rand.Read(b)
return hex.EncodeToString(b)
}
-// CreateVNF reads the CSAR files from the files system and creates them one by one
-var CreateVNF = func(csarID string, cloudRegionID string, namespace string, kubeclient *kubernetes.Clientset) (string, map[string][]string, error) {
+func ensuresNamespace(namespace string, kubeclient *kubernetes.Clientset) error {
namespacePlugin, ok := krd.LoadedPlugins["namespace"]
if !ok {
- return "", nil, pkgerrors.New("No plugin for namespace resource found")
+ return pkgerrors.New("No plugin for namespace resource found")
}
symGetNamespaceFunc, err := namespacePlugin.Lookup("GetResource")
if err != nil {
- return "", nil, pkgerrors.Wrap(err, "Error fetching namespace plugin")
+ return pkgerrors.Wrap(err, "Error fetching get namespace function")
}
- present, err := symGetNamespaceFunc.(func(string, *kubernetes.Clientset) (bool, error))(
+ exists, err := symGetNamespaceFunc.(func(string, *kubernetes.Clientset) (bool, error))(
namespace, kubeclient)
if err != nil {
- return "", nil, pkgerrors.Wrap(err, "Error in plugin namespace plugin")
+ return pkgerrors.Wrap(err, "An error ocurred during the get namespace execution")
}
- if present == false {
+ if !exists {
+ log.Println("Creating " + namespace + " namespace")
symGetNamespaceFunc, err := namespacePlugin.Lookup("CreateResource")
if err != nil {
- return "", nil, pkgerrors.Wrap(err, "Error fetching namespace plugin")
+ return pkgerrors.Wrap(err, "Error fetching create namespace plugin")
}
err = symGetNamespaceFunc.(func(string, *kubernetes.Clientset) error)(
namespace, kubeclient)
if err != nil {
- return "", nil, pkgerrors.Wrap(err, "Error creating "+namespace+" namespace")
+ return pkgerrors.Wrap(err, "Error creating "+namespace+" namespace")
}
}
+ return nil
+}
- var path string
-
- // uuid
- externalVNFID := generateExternalVNFID(8)
-
- // cloud1-default-uuid
+// CreateVNF reads the CSAR files from the files system and creates them one by one
+var CreateVNF = func(csarID string, cloudRegionID string, namespace string, kubeclient *kubernetes.Clientset) (string, map[string][]string, error) {
+ if err := ensuresNamespace(namespace, kubeclient); err != nil {
+ return "", nil, pkgerrors.Wrap(err, "Error while ensuring namespace: "+namespace)
+ }
+ externalVNFID := generateExternalVNFID()
internalVNFID := cloudRegionID + "-" + namespace + "-" + externalVNFID
csarDirPath := os.Getenv("CSAR_DIR") + "/" + csarID
metadataYAMLPath := csarDirPath + "/metadata.yaml"
- seqFile, err := ReadMetadataFile(metadataYAMLPath)
+ log.Println("Reading " + metadataYAMLPath + " file")
+ metadataFile, err := ReadMetadataFile(metadataYAMLPath)
if err != nil {
return "", nil, pkgerrors.Wrap(err, "Error while reading Metadata File: "+metadataYAMLPath)
}
+ var path string
resourceYAMLNameMap := make(map[string][]string)
+ // Iterates over the resources defined in the metadata file to create kubernetes resources
+ log.Println(string(len(metadataFile.ResourceTypePathMap)) + " resource(s) type(s) to be processed")
+ for resource, fileNames := range metadataFile.ResourceTypePathMap {
+ log.Println("Processing items of " + string(resource) + " resource")
+ var resourcesCreated []string
+ for _, filename := range fileNames {
+ path = csarDirPath + "/" + filename
+
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return "", nil, pkgerrors.New("File " + path + "does not exists")
+ }
+ log.Println("Processing file: " + path)
+
+ genericKubeData := &krd.GenericKubeResourceData{
+ YamlFilePath: path,
+ Namespace: namespace,
+ InternalVNFID: internalVNFID,
+ }
+
+ typePlugin, ok := krd.LoadedPlugins[resource]
+ if !ok {
+ return "", nil, pkgerrors.New("No plugin for resource " + resource + " found")
+ }
+
+ symCreateResourceFunc, err := typePlugin.Lookup("CreateResource")
+ if err != nil {
+ return "", nil, pkgerrors.Wrap(err, "Error fetching "+resource+" plugin")
+ }
- for _, resource := range seqFile.ResourceTypePathMap {
- for resourceName, resourceFileNames := range resource {
- // Load/Use Deployment data/client
-
- var resourceNameList []string
-
- for _, filename := range resourceFileNames {
- path = csarDirPath + "/" + filename
-
- _, err = os.Stat(path)
- if os.IsNotExist(err) {
- return "", nil, pkgerrors.New("File " + path + "does not exists")
- }
-
- log.Println("Processing file: " + path)
-
- genericKubeData := &krd.GenericKubeResourceData{
- YamlFilePath: path,
- Namespace: namespace,
- InternalVNFID: internalVNFID,
- }
-
- typePlugin, ok := krd.LoadedPlugins[resourceName]
- if !ok {
- return "", nil, pkgerrors.New("No plugin for resource " + resourceName + " found")
- }
-
- symCreateResourceFunc, err := typePlugin.Lookup("CreateResource")
- if err != nil {
- return "", nil, pkgerrors.Wrap(err, "Error fetching "+resourceName+" plugin")
- }
-
- // cloud1-default-uuid-sisedeploy
- internalResourceName, err := symCreateResourceFunc.(func(*krd.GenericKubeResourceData, *kubernetes.Clientset) (string, error))(
- genericKubeData, kubeclient)
- if err != nil {
- return "", nil, pkgerrors.Wrap(err, "Error in plugin "+resourceName+" plugin")
- }
-
- // ["cloud1-default-uuid-sisedeploy1", "cloud1-default-uuid-sisedeploy2", ... ]
- resourceNameList = append(resourceNameList, internalResourceName)
-
- /*
- {
- "deployment": ["cloud1-default-uuid-sisedeploy1", "cloud1-default-uuid-sisedeploy2", ... ]
- }
- */
- resourceYAMLNameMap[resourceName] = resourceNameList
+ internalResourceName, err := symCreateResourceFunc.(func(*krd.GenericKubeResourceData, *kubernetes.Clientset) (string, error))(
+ genericKubeData, kubeclient)
+ if err != nil {
+ return "", nil, pkgerrors.Wrap(err, "Error in plugin "+resource+" plugin")
}
+ log.Print(internalResourceName + " succesful resource created")
+ resourcesCreated = append(resourcesCreated, internalResourceName)
}
+ resourceYAMLNameMap[resource] = resourcesCreated
}
- /*
- uuid,
- {
- "deployment": ["cloud1-default-uuid-sisedeploy1", "cloud1-default-uuid-sisedeploy2", ... ]
- "service": ["cloud1-default-uuid-sisesvc1", "cloud1-default-uuid-sisesvc2", ... ]
- },
- nil
- */
return externalVNFID, resourceYAMLNameMap, nil
}
@@ -183,25 +166,28 @@ var DestroyVNF = func(data map[string][]string, namespace string, kubeclient *ku
// MetadataFile stores the metadata of execution
type MetadataFile struct {
- ResourceTypePathMap []map[string][]string `yaml:"resources"`
+ ResourceTypePathMap map[string][]string `yaml:"resources"`
}
// ReadMetadataFile reads the metadata yaml to return the order or reads
-var ReadMetadataFile = func(yamlFilePath string) (MetadataFile, error) {
- var seqFile MetadataFile
+var ReadMetadataFile = func(path string) (MetadataFile, error) {
+ var metadataFile MetadataFile
- if _, err := os.Stat(yamlFilePath); err == nil {
- log.Println("Reading metadata YAML: " + yamlFilePath)
- rawBytes, err := ioutil.ReadFile(yamlFilePath)
- if err != nil {
- return seqFile, pkgerrors.Wrap(err, "Metadata YAML file read error")
- }
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return metadataFile, pkgerrors.Wrap(err, "Metadata YAML file does not exist")
+ }
- err = yaml.Unmarshal(rawBytes, &seqFile)
- if err != nil {
- return seqFile, pkgerrors.Wrap(err, "Metadata YAML file read error")
- }
+ log.Println("Reading metadata YAML: " + path)
+ yamlFile, err := ioutil.ReadFile(path)
+ if err != nil {
+ return metadataFile, pkgerrors.Wrap(err, "Metadata YAML file read error")
+ }
+
+ err = yaml.Unmarshal(yamlFile, &metadataFile)
+ if err != nil {
+ return metadataFile, pkgerrors.Wrap(err, "Metadata YAML file unmarshal error")
}
+ log.Printf("metadata:\n%v", metadataFile)
- return seqFile, nil
+ return metadataFile, nil
}
diff --git a/src/k8splugin/mock_files/mock_yamls/metadata.yaml b/src/k8splugin/mock_files/mock_yamls/metadata.yaml
index dcc1c32e..0289214a 100644
--- a/src/k8splugin/mock_files/mock_yamls/metadata.yaml
+++ b/src/k8splugin/mock_files/mock_yamls/metadata.yaml
@@ -10,7 +10,7 @@
# limitations under the License.
resources:
- - deployment:
+ deployment:
- deployment.yaml
- - service:
+ service:
- service.yaml
diff --git a/tox.ini b/tox.ini
index 2e593451..049deaec 100644
--- a/tox.ini
+++ b/tox.ini
@@ -14,10 +14,13 @@ deps =
rstcheck
whitelist_externals = bash
commands = bash -c "find {toxinidir} -not -path {toxinidir}/.tox/\* \
+ -not -path {toxinidir}/pkg/dep/\* \
+ -not -path {toxinidir}/src/k8splugin/vendor/\* \
+ -not -path {toxinidir}/src/github.com/\* \
-name \*.sh -type f \
# E006 check for lines longer than 79 columns
-print0 | xargs -0 bashate -v -iE006"
- bash -c "find {toxinidir} -not -path {toxinidir}/.tox/\* \
+ bash -c "find {toxinidir}/docs \
-name \*.rst -type f -print0 | xargs -0 rstcheck --report warning"
[testenv:docs]
diff --git a/vagrant/Vagrantfile b/vagrant/Vagrantfile
index 61031bba..7ff18f06 100644
--- a/vagrant/Vagrantfile
+++ b/vagrant/Vagrantfile
@@ -109,7 +109,7 @@ Vagrant.configure("2") do |config|
installer.vm.synced_folder '../', '/root/go/src/k8-plugin-multicloud/', type: sync_type
installer.vm.provision 'shell' do |sh|
sh.path = "installer.sh"
- sh.args = ['-p', '-v', '-w', '/vagrant']
+ sh.args = ['-p', '-v', '-w', '/root/go/src/k8-plugin-multicloud/vagrant']
end
end
end
diff --git a/vagrant/installer.sh b/vagrant/installer.sh
index 5df87e4c..f6f2f76a 100755
--- a/vagrant/installer.sh
+++ b/vagrant/installer.sh
@@ -169,11 +169,9 @@ function install_plugin {
_install_docker
pip install docker-compose
- mkdir -p /opt/{csar,kubeconfig,consul/config}
+ mkdir -p /opt/{kubeconfig,consul/config}
cp $HOME/.kube/config /opt/kubeconfig/krd
- export CSAR_DIR=/opt/csar
export KUBE_CONFIG_DIR=/opt/kubeconfig
- echo "export CSAR_DIR=${CSAR_DIR}" >> /etc/environment
echo "export KUBE_CONFIG_DIR=${KUBE_CONFIG_DIR}" >> /etc/environment
GOPATH=$(go env GOPATH)
@@ -257,6 +255,9 @@ krd_tests=$krd_folder/tests
k8s_info_file=$krd_folder/k8s_info.log
mkdir -p $log_folder
+mkdir -p /opt/csar
+export CSAR_DIR=/opt/csar
+echo "export CSAR_DIR=${CSAR_DIR}" >> /etc/environment
# Install dependencies
# Setup proxy variables
diff --git a/vagrant/playbooks/krd-vars.yml b/vagrant/playbooks/krd-vars.yml
index 3b1b2a06..89aa21db 100644
--- a/vagrant/playbooks/krd-vars.yml
+++ b/vagrant/playbooks/krd-vars.yml
@@ -12,7 +12,7 @@ base_dest: /tmp
multus_dest: "{{ base_dest }}/multus-cni"
multus_source_type: "tarball"
-multus_version: 3.1
+multus_version: 2.0
multus_url: "https://github.com/intel/multus-cni/releases/download/v{{ multus_version }}/multus-cni_v{{ multus_version }}_linux_amd64.tar.gz"
#multus_source_type: "source"
#multus_version: def72938cd2fb272eb3a6f64a8162b1049404357
diff --git a/vagrant/tests/_common.sh b/vagrant/tests/_common.sh
new file mode 100755
index 00000000..d1b6315d
--- /dev/null
+++ b/vagrant/tests/_common.sh
@@ -0,0 +1,576 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+packetgen_deployment_name=packetgen
+sink_deployment_name=sink
+firewall_deployment_name=firewall
+image_name=virtlet.cloud/ubuntu/16.04
+multus_deployment_name=multus-deployment
+virtlet_image=virtlet.cloud/fedora
+virtlet_deployment_name=virtlet-deployment
+
+# popule_CSAR_containers_vFW() - This function creates the content of CSAR file
+# required for vFirewal using only containers
+function popule_CSAR_containers_vFW {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - unprotected-private-net-cidr-network.yaml
+ - protected-private-net-cidr-network.yaml
+ - onap-private-net-cidr-network.yaml
+ deployment:
+ - $packetgen_deployment_name.yaml
+ - $firewall_deployment_name.yaml
+ - $sink_deployment_name.yaml
+META
+
+ cat << NET > unprotected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: unprotected-private-net-cidr
+spec:
+ config: '{
+ "name": "unprotected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.10.0/24"
+ }
+}'
+NET
+
+ cat << NET > protected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: protected-private-net-cidr
+spec:
+ config: '{
+ "name": "protected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.20.0/24"
+ }
+}'
+NET
+
+ cat << NET > onap-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: onap-private-net-cidr
+spec:
+ config: '{
+ "name": "onap",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+ cat << DEPLOYMENT > $packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $packetgen_deployment_name
+ image: electrocucaracha/packetgen
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 256Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
+ ]'
+ spec:
+ containers:
+ - name: $firewall_deployment_name
+ image: electrocucaracha/firewall
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $sink_deployment_name
+ image: electrocucaracha/sink
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+ popd
+}
+
+# popule_CSAR_vms_vFW() - This function creates the content of CSAR file
+# required for vFirewal using only virtual machines
+function popule_CSAR_vms_vFW {
+ local csar_id=$1
+ ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - unprotected-private-net-cidr-network.yaml
+ - protected-private-net-cidr-network.yaml
+ - onap-private-net-cidr-network.yaml
+ deployment:
+ - $packetgen_deployment_name.yaml
+ - $firewall_deployment_name.yaml
+ - $sink_deployment_name.yaml
+META
+
+ cat << NET > unprotected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: unprotected-private-net-cidr
+spec:
+ config: '{
+ "name": "unprotected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.10.0/24"
+ }
+}'
+NET
+
+ cat << NET > protected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: protected-private-net-cidr
+spec:
+ config: '{
+ "name": "protected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.20.0/24"
+ }
+}'
+NET
+
+ cat << NET > onap-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: onap-private-net-cidr
+spec:
+ config: '{
+ "name": "onap",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+ proxy="#!/bin/bash"
+ if [[ -n "${http_proxy+x}" ]]; then
+ proxy+="
+ export http_proxy=$http_proxy
+ echo \"Acquire::http::Proxy \\\"$http_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy"
+ fi
+ if [[ -n "${https_proxy+x}" ]]; then
+ proxy+="
+ export https_proxy=$https_proxy
+ echo \"Acquire::https::Proxy \\\"$https_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy"
+ fi
+ if [[ -n "${no_proxy+x}" ]]; then
+ proxy+="
+ export no_proxy=$no_proxy"
+ fi
+
+ cat << DEPLOYMENT > $packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$packetgen_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $packetgen_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 256Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$firewall_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $firewall_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$sink_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $sink_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+ popd
+}
+
+# popule_CSAR_multus() - This function creates the content of CSAR file
+# required for testing Multus feature
+function popule_CSAR_multus {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - bridge-network.yaml
+ deployment:
+ - $multus_deployment_name.yaml
+META
+
+ cat << NET > bridge-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: bridge-conf
+spec:
+ config: '{
+ "name": "mynet",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+ cat << DEPLOYMENT > $multus_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $multus_deployment_name
+ labels:
+ app: multus
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: multus
+ template:
+ metadata:
+ labels:
+ app: multus
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "bridge-conf", "interfaceRequest": "eth1" },
+ { "name": "bridge-conf", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $multus_deployment_name
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+DEPLOYMENT
+ popd
+}
+
+# popule_CSAR_virtlet() - This function creates the content of CSAR file
+# required for testing Virtlet feature
+function popule_CSAR_virtlet {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ deployment:
+ - $virtlet_deployment_name.yaml
+META
+
+ cat << DEPLOYMENT > $virtlet_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $virtlet_deployment_name
+ labels:
+ app: virtlet
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: virtlet
+ template:
+ metadata:
+ labels:
+ app: virtlet
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ VirtletCloudInitUserDataScript: |
+ #!/bin/sh
+ echo hello world
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $virtlet_deployment_name
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: $virtlet_image
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for "kubectl attach -t" to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi
+DEPLOYMENT
+popd
+}
diff --git a/vagrant/tests/_functions.sh b/vagrant/tests/_functions.sh
new file mode 100755
index 00000000..50473476
--- /dev/null
+++ b/vagrant/tests/_functions.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+function _checks_args {
+ if [[ -z $1 ]]; then
+ echo "Missing CSAR ID argument"
+ exit 1
+ fi
+ if [[ -z $CSAR_DIR ]]; then
+ echo "CSAR_DIR global environment value is empty"
+ exit 1
+ fi
+ mkdir -p ${CSAR_DIR}/${1}
+}
+
+# destroy_deployment() - This function ensures that a specific deployment is
+# destroyed in Kubernetes
+function destroy_deployment {
+ local deployment_name=$1
+
+ kubectl delete deployment $deployment_name --ignore-not-found=true --now
+ while kubectl get deployment $deployment_name &>/dev/null; do
+ echo "$(date +%H:%M:%S) - $deployment_name : Destroying deployment"
+ done
+}
+
+# recreate_deployment() - This function destroys an existing deployment and
+# creates an new one based on its yaml file
+function recreate_deployment {
+ local deployment_name=$1
+
+ destroy_deployment $deployment_name
+ kubectl create -f $deployment_name.yaml
+}
+
+# wait_deployment() - Wait process to Running status on the Deployment's pods
+function wait_deployment {
+ local deployment_name=$1
+
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+}
+
+# setup() - Base testing setup shared among functional tests
+function setup {
+ for deployment_name in $@; do
+ recreate_deployment $deployment_name
+ done
+
+ for deployment_name in $@; do
+ wait_deployment $deployment_name
+ done
+}
+
+# teardown() - Base testing teardown function
+function teardown {
+ for deployment_name in $@; do
+ destroy_deployment $deployment_name
+ done
+}
+
+if ! $(kubectl version &>/dev/null); then
+ echo "This funtional test requires kubectl client"
+ exit 1
+fi
diff --git a/vagrant/tests/integration_cFW.sh b/vagrant/tests/integration_cFW.sh
index e4b305f4..4a452599 100755
--- a/vagrant/tests/integration_cFW.sh
+++ b/vagrant/tests/integration_cFW.sh
@@ -12,183 +12,22 @@ set -o errexit
set -o nounset
set -o pipefail
-rm -f $HOME/*.yaml
-packetgen_deployment_name=packetgen
-sink_deployment_name=sink
-firewall_deployment_name=firewall
+source _common.sh
+source _functions.sh
-cat << NET > $HOME/unprotected-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: unprotected-private-net-cidr
-spec:
- config: '{
- "name": "unprotected",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.10.0/24"
- }
-}'
-NET
+csar_id=4f726e2a-b74a-11e8-ad7c-525400feed2
-cat << NET > $HOME/protected-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: protected-private-net-cidr
-spec:
- config: '{
- "name": "protected",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.20.0/24"
- }
-}'
-NET
+# Setup
+popule_CSAR_containers_vFW $csar_id
-cat << NET > $HOME/onap-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: onap-private-net-cidr
-spec:
- config: '{
- "name": "onap",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "10.10.0.0/16"
- }
-}'
-NET
+pushd ${CSAR_DIR}/${csar_id}
+for network in unprotected-private-net-cidr-network protected-private-net-cidr-network onap-private-net-cidr-network; do
+ kubectl apply -f $network.yaml
+done
+setup $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
-cat << DEPLOYMENT > $HOME/$packetgen_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $packetgen_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
- ]'
- spec:
- containers:
- - name: $packetgen_deployment_name
- image: electrocucaracha/packetgen
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 256Mi
-DEPLOYMENT
+# Test
+popd
-cat << DEPLOYMENT > $HOME/$firewall_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $firewall_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
- ]'
- spec:
- containers:
- - name: $firewall_deployment_name
- image: electrocucaracha/firewall
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 160Mi
-DEPLOYMENT
-
-cat << DEPLOYMENT > $HOME/$sink_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $sink_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
- ]'
- spec:
- containers:
- - name: $sink_deployment_name
- image: electrocucaracha/sink
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 160Mi
-DEPLOYMENT
-
-if $(kubectl version &>/dev/null); then
- kubectl apply -f $HOME/unprotected-private-net-cidr-network.yaml
- kubectl apply -f $HOME/protected-private-net-cidr-network.yaml
- kubectl apply -f $HOME/onap-private-net-cidr-network.yaml
-
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- kubectl delete deployment $deployment_name --ignore-not-found=true --now
- while kubectl get deployment $deployment_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$deployment_name.yaml
- done
-
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- status_phase=""
- while [[ $status_phase != "Running" ]]; do
- new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
- if [[ $new_phase != $status_phase ]]; then
- echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
- status_phase=$new_phase
- fi
- if [[ $new_phase == "Err"* ]]; then
- exit 1
- fi
- done
- done
-fi
+# Teardown
+teardown $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
diff --git a/vagrant/tests/integration_vFW.sh b/vagrant/tests/integration_vFW.sh
index fa48d7c5..ee0205cb 100755
--- a/vagrant/tests/integration_vFW.sh
+++ b/vagrant/tests/integration_vFW.sh
@@ -12,284 +12,33 @@ set -o errexit
set -o nounset
set -o pipefail
-rm -f $HOME/*.yaml
-packetgen_deployment_name=packetgen
-sink_deployment_name=sink
-firewall_deployment_name=firewall
-image_name=virtlet.cloud/ubuntu/16.04
+source _common.sh
+source _functions.sh
+csar_id=66fea6f0-b74d-11e8-95a0-525400feed26
+
+# Setup
if [[ ! -f $HOME/.ssh/id_rsa.pub ]]; then
echo -e "\n\n\n" | ssh-keygen -t rsa -N ""
fi
-ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
-
-cat << NET > $HOME/unprotected-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: unprotected-private-net-cidr
-spec:
- config: '{
- "name": "unprotected",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.10.0/24"
- }
-}'
-NET
-
-cat << NET > $HOME/protected-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: protected-private-net-cidr
-spec:
- config: '{
- "name": "protected",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.20.0/24"
- }
-}'
-NET
-
-cat << NET > $HOME/onap-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: onap-private-net-cidr
-spec:
- config: '{
- "name": "onap",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "10.10.0.0/16"
- }
-}'
-NET
-
-proxy="#!/bin/bash"
-if [[ -n "${http_proxy+x}" ]]; then
- proxy+="
- export http_proxy=$http_proxy
- echo \"Acquire::http::Proxy \\\"$http_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy
-"
-fi
-if [[ -n "${https_proxy+x}" ]]; then
- proxy+="
- export https_proxy=$https_proxy
- echo \"Acquire::https::Proxy \\\"$https_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy
-"
-fi
-if [[ -n "${no_proxy+x}" ]]; then
- proxy+="
- export no_proxy=$no_proxy"
-fi
-
-cat << DEPLOYMENT > $HOME/$packetgen_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $packetgen_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- VirtletCloudInitUserData: |
- users:
- - default
- - name: admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
- ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$packetgen_deployment_name | sudo -E bash
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
- ]'
- kubernetes.io/target-runtime: virtlet.cloud
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $packetgen_deployment_name
- image: $image_name
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 256Mi
-DEPLOYMENT
-
-cat << DEPLOYMENT > $HOME/$firewall_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $firewall_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- VirtletCloudInitUserData: |
- users:
- - default
- - name: admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
- ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$firewall_deployment_name | sudo -E bash
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
- ]'
- kubernetes.io/target-runtime: virtlet.cloud
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $firewall_deployment_name
- image: $image_name
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 160Mi
-DEPLOYMENT
-
-cat << DEPLOYMENT > $HOME/$sink_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $sink_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- VirtletCloudInitUserData: |
- users:
- - default
- - name: admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
- ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$sink_deployment_name | sudo -E bash
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
- ]'
- kubernetes.io/target-runtime: virtlet.cloud
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $sink_deployment_name
- image: $image_name
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 160Mi
-DEPLOYMENT
-
-if $(kubectl version &>/dev/null); then
- kubectl apply -f $HOME/unprotected-private-net-cidr-network.yaml
- kubectl apply -f $HOME/protected-private-net-cidr-network.yaml
- kubectl apply -f $HOME/onap-private-net-cidr-network.yaml
-
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- kubectl delete deployment $deployment_name --ignore-not-found=true --now
- while kubectl get deployment $deployment_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$deployment_name.yaml
- done
-
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- status_phase=""
- while [[ $status_phase != "Running" ]]; do
- new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
- if [[ $new_phase != $status_phase ]]; then
- echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
- status_phase=$new_phase
- fi
- if [[ $new_phase == "Err"* ]]; then
- exit 1
- fi
- done
- done
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- pod_name=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
- vm=$(kubectl plugin virt virsh list | grep ".*$deployment_name" | awk '{print $2}')
- echo "Pod name: $pod_name Virsh domain: $vm"
- echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")"
- echo "=== Virtlet details ===="
- echo "$(kubectl plugin virt virsh dumpxml $vm | grep VIRTLET_)\n"
- done
-fi
+popule_CSAR_vms_vFW $csar_id
+
+pushd ${CSAR_DIR}/${csar_id}
+for network in unprotected-private-net-cidr-network protected-private-net-cidr-network onap-private-net-cidr-network; do
+ kubectl apply -f $network.yaml
+done
+setup $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
+
+# Test
+for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
+ pod_name=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
+ vm=$(kubectl plugin virt virsh list | grep ".*$deployment_name" | awk '{print $2}')
+ echo "Pod name: $pod_name Virsh domain: $vm"
+ echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")"
+ echo "=== Virtlet details ===="
+ echo "$(kubectl plugin virt virsh dumpxml $vm | grep VIRTLET_)\n"
+done
+popd
+
+# Teardown
+teardown $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
diff --git a/vagrant/tests/multus.sh b/vagrant/tests/multus.sh
index c5f7fc71..96fa37d8 100755
--- a/vagrant/tests/multus.sh
+++ b/vagrant/tests/multus.sh
@@ -12,112 +12,29 @@ set -o errexit
set -o nounset
set -o pipefail
-rm -f $HOME/*.yaml
+source _common.sh
+source _functions.sh
-pod_name=multus-pod
-deployment_name=multus-deployment
+csar_id=49408ca6-b75b-11e8-8076-525400feed26
-cat << NET > $HOME/bridge-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: bridge-conf
-spec:
- config: '{
- "name": "mynet",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "10.10.0.0/16"
- }
-}'
-NET
+# Setup
+popule_CSAR_multus $csar_id
-cat << POD > $HOME/$pod_name.yaml
-apiVersion: v1
-kind: Pod
-metadata:
- name: $pod_name
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "bridge-conf", "interfaceRequest": "eth1" },
- { "name": "bridge-conf", "interfaceRequest": "eth2" }
- ]'
-spec: # specification of the pod's contents
- containers:
- - name: $pod_name
- image: "busybox"
- command: ["top"]
- stdin: true
- tty: true
-POD
+pushd ${CSAR_DIR}/${csar_id}
+kubectl apply -f bridge-network.yaml
-cat << DEPLOYMENT > $HOME/$deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $deployment_name
- labels:
- app: multus
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: multus
- template:
- metadata:
- labels:
- app: multus
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "bridge-conf", "interfaceRequest": "eth1" },
- { "name": "bridge-conf", "interfaceRequest": "eth2" }
- ]'
- spec:
- containers:
- - name: $deployment_name
- image: "busybox"
- command: ["top"]
- stdin: true
- tty: true
-DEPLOYMENT
+setup $multus_deployment_name
-if $(kubectl version &>/dev/null); then
- kubectl apply -f $HOME/bridge-network.yaml
-
- kubectl delete pod $pod_name --ignore-not-found=true --now
- kubectl delete deployment $deployment_name --ignore-not-found=true --now
- while kubectl get pod $pod_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$pod_name.yaml
- while kubectl get deployment $deployment_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$deployment_name.yaml
- sleep 5
-
- deployment_pod=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
- for pod in $pod_name $deployment_pod; do
- status_phase=""
- while [[ $status_phase != "Running" ]]; do
- new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
- if [[ $new_phase != $status_phase ]]; then
- echo "$(date +%H:%M:%S) - $pod : $new_phase"
- status_phase=$new_phase
- fi
- if [[ $new_phase == "Err"* ]]; then
- exit 1
- fi
- done
- done
-
- for pod in $pod_name $deployment_pod; do
- echo "===== $pod details ====="
- kubectl exec -it $pod -- ip a
- multus_nic=$(kubectl exec -it $pod -- ifconfig | grep "eth1")
- if [ -z "$multus_nic" ]; then
- exit 1
- fi
- done
+# Test
+deployment_pod=$(kubectl get pods | grep $multus_deployment_name | awk '{print $1}')
+echo "===== $deployment_pod details ====="
+kubectl exec -it $deployment_pod -- ip a
+multus_nic=$(kubectl exec -it $deployment_pod -- ifconfig | grep "eth1")
+if [ -z "$multus_nic" ]; then
+ echo "The $deployment_pod pod doesn't contain the eth1 nic"
+ exit 1
fi
+popd
+
+# Teardown
+teardown $multus_deployment_name
diff --git a/vagrant/tests/plugin.sh b/vagrant/tests/plugin.sh
index 744b2207..ac373cd3 100755
--- a/vagrant/tests/plugin.sh
+++ b/vagrant/tests/plugin.sh
@@ -11,6 +11,7 @@
set -o errexit
set -o nounset
set -o pipefail
+#set -o xtrace
# _build_generic_sim() - Creates a generic simulator image in case that doesn't exist
function _build_generic_sim {
@@ -44,12 +45,14 @@ function start_aai_service {
# populate_csar_dir()- Creates content used for Functional tests
function populate_csar_dir {
mkdir -p ${CSAR_DIR}/${csar_id}
- cat << SEQ > ${CSAR_DIR}/${csar_id}/metadata.yaml
-deployment:
- - deployment.yaml
-service:
- - service.yaml
-SEQ
+ cat << META > ${CSAR_DIR}/${csar_id}/metadata.yaml
+resources:
+ deployment:
+ - deployment.yaml
+ service:
+ - service.yaml
+META
+
cat << DEPLOYMENT > ${CSAR_DIR}/${csar_id}/deployment.yaml
apiVersion: apps/v1
kind: Deployment
@@ -98,8 +101,8 @@ base_url="http://localhost:8081/v1/vnf_instances/"
cloud_region_id="krd"
namespace="default"
csar_id="94e414f6-9ca4-11e8-bb6a-52540067263b"
-deployment_name="plugin_functional_test_deployment"
-service_name="plugin_functional_test_service"
+deployment_name="test-deployment"
+service_name="test-service"
#start_aai_service
populate_csar_dir
@@ -114,16 +117,18 @@ payload_raw="
"
payload=$(echo $payload_raw | tr '\n' ' ')
echo "Creating VNF Instance"
-curl -d "$payload" "${base_url}"
+vnf_id=$(curl -s -d "$payload" "${base_url}" | jq -r '.vnf_id')
+echo "=== Validating Kubernetes ==="
+kubectl get --no-headers=true --namespace=${namespace} deployment ${cloud_region_id}-${namespace}-${vnf_id}-${deployment_name}
+kubectl get --no-headers=true --namespace=${namespace} service ${cloud_region_id}-${namespace}-${vnf_id}-$service_name
+echo "VNF Instance created succesfully with id: $vnf_id"
-vnf_id=$(curl -s -X GET "${base_url}${cloud_region_id}/${namespace}" | jq -r '.vnf_id_list[0]')
-if [[ -z "$vnf_id" ]]; then
- echo "VNF Instance not created"
+vnf_id_list=$(curl -s -X GET "${base_url}${cloud_region_id}/${namespace}" | jq -r '.vnf_id_list')
+if [[ "$vnf_id_list" != *"${vnf_id}"* ]]; then
+ echo $vnf_id_list
+ echo "VNF Instance not stored"
exit 1
fi
-echo "VNF Instance created succesfully with id: $vnf_id"
-#kubectl get deployment $deployment_name
-#kubectl get service $service_name
vnf_details=$(curl -s -X GET "${base_url}${cloud_region_id}/${namespace}/${vnf_id}")
if [[ -z "$vnf_details" ]]; then
@@ -138,4 +143,3 @@ if [[ -n $(curl -s -X GET "${base_url}${cloud_region_id}/${namespace}/${vnf_id}"
echo "VNF Instance not deleted"
exit 1
fi
-docker logs deployments_multicloud-k8s_1
diff --git a/vagrant/tests/virtlet.sh b/vagrant/tests/virtlet.sh
index a8af071f..4a43ff34 100755
--- a/vagrant/tests/virtlet.sh
+++ b/vagrant/tests/virtlet.sh
@@ -12,134 +12,27 @@ set -o errexit
set -o nounset
set -o pipefail
-rm -f $HOME/*.yaml
+source _common.sh
+source _functions.sh
-virtlet_image=virtlet.cloud/fedora
-pod_name=virtlet-pod
-deployment_name=virtlet-deployment
+csar_id=6b54a728-b76a-11e8-a1ba-52540053ccc8
-cat << POD > $HOME/$pod_name.yaml
-apiVersion: v1
-kind: Pod
-metadata:
- name: $pod_name
- annotations:
- # This tells CRI Proxy that this pod belongs to Virtlet runtime
- kubernetes.io/target-runtime: virtlet.cloud
- VirtletCloudInitUserDataScript: |
- #!/bin/sh
- echo hello world
-spec:
- # This nodeAffinity specification tells Kubernetes to run this
- # pod only on the nodes that have extraRuntime=virtlet label.
- # This label is used by Virtlet DaemonSet to select nodes
- # that must have Virtlet runtime
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $pod_name
- # This specifies the image to use.
- # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
- # of the image name is prepended with https:// and used to download the image
- image: $virtlet_image
- imagePullPolicy: IfNotPresent
- # tty and stdin required for "kubectl attach -t" to work
- tty: true
- stdin: true
- resources:
- limits:
- # This memory limit is applied to the libvirt domain definition
- memory: 160Mi
-POD
+# Setup
+popule_CSAR_virtlet $csar_id
-cat << DEPLOYMENT > $HOME/$deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $deployment_name
- labels:
- app: virtlet
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: virtlet
- template:
- metadata:
- labels:
- app: virtlet
- annotations:
- # This tells CRI Proxy that this pod belongs to Virtlet runtime
- kubernetes.io/target-runtime: virtlet.cloud
- VirtletCloudInitUserDataScript: |
- #!/bin/sh
- echo hello world
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $deployment_name
- # This specifies the image to use.
- # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
- # of the image name is prepended with https:// and used to download the image
- image: $virtlet_image
- imagePullPolicy: IfNotPresent
- # tty and stdin required for "kubectl attach -t" to work
- tty: true
- stdin: true
- resources:
- limits:
- # This memory limit is applied to the libvirt domain definition
- memory: 160Mi
-DEPLOYMENT
+pushd ${CSAR_DIR}/${csar_id}
-if $(kubectl version &>/dev/null); then
- kubectl delete pod $pod_name --ignore-not-found=true --now
- kubectl delete deployment $deployment_name --ignore-not-found=true --now
- while kubectl get pod $pod_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$pod_name.yaml
- while kubectl get deployment $deployment_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$deployment_name.yaml
- sleep 5
+setup $virtlet_deployment_name
- deployment_pod=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
- for pod in $pod_name $deployment_pod; do
- status_phase=""
- while [[ $status_phase != "Running" ]]; do
- new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
- if [[ $new_phase != $status_phase ]]; then
- echo "$(date +%H:%M:%S) - $pod : $new_phase"
- status_phase=$new_phase
- fi
- if [[ $new_phase == "Err"* ]]; then
- exit 1
- fi
- done
- done
-
- kubectl plugin virt virsh list
- for pod in $pod_name $deployment_name; do
- virsh_image=$(kubectl plugin virt virsh list | grep "virtlet-.*-$pod")
- if [[ -z "$virsh_image" ]]; then
- exit 1
- fi
- done
+# Test
+kubectl plugin virt virsh list
+deployment_pod=$(kubectl get pods | grep $virtlet_deployment_name | awk '{print $1}')
+virsh_image=$(kubectl plugin virt virsh list | grep "virtlet-.*-$deployment_pod")
+if [[ -z "$virsh_image" ]]; then
+ echo "There is no Virtual Machine running by $deployment_pod pod"
+ exit 1
fi
+popd
+
+# Teardown
+teardown $virtlet_deployment_name