summaryrefslogtreecommitdiffstats
path: root/vagrant/tests
diff options
context:
space:
mode:
authorVictor Morales <victor.morales@intel.com>2018-09-13 09:19:13 -0700
committerVictor Morales <victor.morales@intel.com>2018-09-13 10:05:21 -0700
commit7ee9ee4e4361fa31d4e565c1b7539006598c0223 (patch)
treee28308f8895d031faa619d464a3827aa3d6bd374 /vagrant/tests
parentcb64a5fed1b10daa301235a2ef956b4805da99d3 (diff)
Refactor Functional tests
The current implementation of Functional tests didn't allow to reuse some functionality. This change exposes common functions and methods to populated CSAR folders. Change-Id: I8f4daf9a0a12633f952677b3e15de42bea5226e6 Signed-off-by: Victor Morales <victor.morales@intel.com> Issue-ID: MULTICLOUD-301
Diffstat (limited to 'vagrant/tests')
-rwxr-xr-xvagrant/tests/_common.sh576
-rwxr-xr-xvagrant/tests/_functions.sh85
-rwxr-xr-xvagrant/tests/integration_cFW.sh189
-rwxr-xr-xvagrant/tests/integration_vFW.sh303
-rwxr-xr-xvagrant/tests/multus.sh123
-rwxr-xr-xvagrant/tests/virtlet.sh143
6 files changed, 739 insertions, 680 deletions
diff --git a/vagrant/tests/_common.sh b/vagrant/tests/_common.sh
new file mode 100755
index 00000000..d1b6315d
--- /dev/null
+++ b/vagrant/tests/_common.sh
@@ -0,0 +1,576 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+packetgen_deployment_name=packetgen
+sink_deployment_name=sink
+firewall_deployment_name=firewall
+image_name=virtlet.cloud/ubuntu/16.04
+multus_deployment_name=multus-deployment
+virtlet_image=virtlet.cloud/fedora
+virtlet_deployment_name=virtlet-deployment
+
+# popule_CSAR_containers_vFW() - This function creates the content of CSAR file
+# required for vFirewal using only containers
+function popule_CSAR_containers_vFW {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - unprotected-private-net-cidr-network.yaml
+ - protected-private-net-cidr-network.yaml
+ - onap-private-net-cidr-network.yaml
+ deployment:
+ - $packetgen_deployment_name.yaml
+ - $firewall_deployment_name.yaml
+ - $sink_deployment_name.yaml
+META
+
+ cat << NET > unprotected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: unprotected-private-net-cidr
+spec:
+ config: '{
+ "name": "unprotected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.10.0/24"
+ }
+}'
+NET
+
+ cat << NET > protected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: protected-private-net-cidr
+spec:
+ config: '{
+ "name": "protected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.20.0/24"
+ }
+}'
+NET
+
+ cat << NET > onap-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: onap-private-net-cidr
+spec:
+ config: '{
+ "name": "onap",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+ cat << DEPLOYMENT > $packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $packetgen_deployment_name
+ image: electrocucaracha/packetgen
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 256Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
+ ]'
+ spec:
+ containers:
+ - name: $firewall_deployment_name
+ image: electrocucaracha/firewall
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $sink_deployment_name
+ image: electrocucaracha/sink
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+ popd
+}
+
+# popule_CSAR_vms_vFW() - This function creates the content of CSAR file
+# required for vFirewal using only virtual machines
+function popule_CSAR_vms_vFW {
+ local csar_id=$1
+ ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - unprotected-private-net-cidr-network.yaml
+ - protected-private-net-cidr-network.yaml
+ - onap-private-net-cidr-network.yaml
+ deployment:
+ - $packetgen_deployment_name.yaml
+ - $firewall_deployment_name.yaml
+ - $sink_deployment_name.yaml
+META
+
+ cat << NET > unprotected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: unprotected-private-net-cidr
+spec:
+ config: '{
+ "name": "unprotected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.10.0/24"
+ }
+}'
+NET
+
+ cat << NET > protected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: protected-private-net-cidr
+spec:
+ config: '{
+ "name": "protected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.20.0/24"
+ }
+}'
+NET
+
+ cat << NET > onap-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: onap-private-net-cidr
+spec:
+ config: '{
+ "name": "onap",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+ proxy="#!/bin/bash"
+ if [[ -n "${http_proxy+x}" ]]; then
+ proxy+="
+ export http_proxy=$http_proxy
+ echo \"Acquire::http::Proxy \\\"$http_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy"
+ fi
+ if [[ -n "${https_proxy+x}" ]]; then
+ proxy+="
+ export https_proxy=$https_proxy
+ echo \"Acquire::https::Proxy \\\"$https_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy"
+ fi
+ if [[ -n "${no_proxy+x}" ]]; then
+ proxy+="
+ export no_proxy=$no_proxy"
+ fi
+
+ cat << DEPLOYMENT > $packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$packetgen_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $packetgen_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 256Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$firewall_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $firewall_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$sink_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $sink_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+ popd
+}
+
+# popule_CSAR_multus() - This function creates the content of CSAR file
+# required for testing Multus feature
+function popule_CSAR_multus {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - bridge-network.yaml
+ deployment:
+ - $multus_deployment_name.yaml
+META
+
+ cat << NET > bridge-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: bridge-conf
+spec:
+ config: '{
+ "name": "mynet",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+ cat << DEPLOYMENT > $multus_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $multus_deployment_name
+ labels:
+ app: multus
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: multus
+ template:
+ metadata:
+ labels:
+ app: multus
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "bridge-conf", "interfaceRequest": "eth1" },
+ { "name": "bridge-conf", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $multus_deployment_name
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+DEPLOYMENT
+ popd
+}
+
+# popule_CSAR_virtlet() - This function creates the content of CSAR file
+# required for testing Virtlet feature
+function popule_CSAR_virtlet {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ deployment:
+ - $virtlet_deployment_name.yaml
+META
+
+ cat << DEPLOYMENT > $virtlet_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $virtlet_deployment_name
+ labels:
+ app: virtlet
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: virtlet
+ template:
+ metadata:
+ labels:
+ app: virtlet
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ VirtletCloudInitUserDataScript: |
+ #!/bin/sh
+ echo hello world
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $virtlet_deployment_name
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: $virtlet_image
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for "kubectl attach -t" to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi
+DEPLOYMENT
+popd
+}
diff --git a/vagrant/tests/_functions.sh b/vagrant/tests/_functions.sh
new file mode 100755
index 00000000..50473476
--- /dev/null
+++ b/vagrant/tests/_functions.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+function _checks_args {
+ if [[ -z $1 ]]; then
+ echo "Missing CSAR ID argument"
+ exit 1
+ fi
+ if [[ -z $CSAR_DIR ]]; then
+ echo "CSAR_DIR global environment value is empty"
+ exit 1
+ fi
+ mkdir -p ${CSAR_DIR}/${1}
+}
+
+# destroy_deployment() - This function ensures that a specific deployment is
+# destroyed in Kubernetes
+function destroy_deployment {
+ local deployment_name=$1
+
+ kubectl delete deployment $deployment_name --ignore-not-found=true --now
+ while kubectl get deployment $deployment_name &>/dev/null; do
+ echo "$(date +%H:%M:%S) - $deployment_name : Destroying deployment"
+ done
+}
+
+# recreate_deployment() - This function destroys an existing deployment and
+# creates an new one based on its yaml file
+function recreate_deployment {
+ local deployment_name=$1
+
+ destroy_deployment $deployment_name
+ kubectl create -f $deployment_name.yaml
+}
+
+# wait_deployment() - Wait process to Running status on the Deployment's pods
+function wait_deployment {
+ local deployment_name=$1
+
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+}
+
+# setup() - Base testing setup shared among functional tests
+function setup {
+ for deployment_name in $@; do
+ recreate_deployment $deployment_name
+ done
+
+ for deployment_name in $@; do
+ wait_deployment $deployment_name
+ done
+}
+
+# teardown() - Base testing teardown function
+function teardown {
+ for deployment_name in $@; do
+ destroy_deployment $deployment_name
+ done
+}
+
+if ! $(kubectl version &>/dev/null); then
+ echo "This funtional test requires kubectl client"
+ exit 1
+fi
diff --git a/vagrant/tests/integration_cFW.sh b/vagrant/tests/integration_cFW.sh
index e4b305f4..4a452599 100755
--- a/vagrant/tests/integration_cFW.sh
+++ b/vagrant/tests/integration_cFW.sh
@@ -12,183 +12,22 @@ set -o errexit
set -o nounset
set -o pipefail
-rm -f $HOME/*.yaml
-packetgen_deployment_name=packetgen
-sink_deployment_name=sink
-firewall_deployment_name=firewall
+source _common.sh
+source _functions.sh
-cat << NET > $HOME/unprotected-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: unprotected-private-net-cidr
-spec:
- config: '{
- "name": "unprotected",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.10.0/24"
- }
-}'
-NET
+csar_id=4f726e2a-b74a-11e8-ad7c-525400feed2
-cat << NET > $HOME/protected-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: protected-private-net-cidr
-spec:
- config: '{
- "name": "protected",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.20.0/24"
- }
-}'
-NET
+# Setup
+popule_CSAR_containers_vFW $csar_id
-cat << NET > $HOME/onap-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: onap-private-net-cidr
-spec:
- config: '{
- "name": "onap",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "10.10.0.0/16"
- }
-}'
-NET
+pushd ${CSAR_DIR}/${csar_id}
+for network in unprotected-private-net-cidr-network protected-private-net-cidr-network onap-private-net-cidr-network; do
+ kubectl apply -f $network.yaml
+done
+setup $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
-cat << DEPLOYMENT > $HOME/$packetgen_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $packetgen_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
- ]'
- spec:
- containers:
- - name: $packetgen_deployment_name
- image: electrocucaracha/packetgen
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 256Mi
-DEPLOYMENT
+# Test
+popd
-cat << DEPLOYMENT > $HOME/$firewall_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $firewall_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
- ]'
- spec:
- containers:
- - name: $firewall_deployment_name
- image: electrocucaracha/firewall
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 160Mi
-DEPLOYMENT
-
-cat << DEPLOYMENT > $HOME/$sink_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $sink_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
- ]'
- spec:
- containers:
- - name: $sink_deployment_name
- image: electrocucaracha/sink
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 160Mi
-DEPLOYMENT
-
-if $(kubectl version &>/dev/null); then
- kubectl apply -f $HOME/unprotected-private-net-cidr-network.yaml
- kubectl apply -f $HOME/protected-private-net-cidr-network.yaml
- kubectl apply -f $HOME/onap-private-net-cidr-network.yaml
-
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- kubectl delete deployment $deployment_name --ignore-not-found=true --now
- while kubectl get deployment $deployment_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$deployment_name.yaml
- done
-
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- status_phase=""
- while [[ $status_phase != "Running" ]]; do
- new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
- if [[ $new_phase != $status_phase ]]; then
- echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
- status_phase=$new_phase
- fi
- if [[ $new_phase == "Err"* ]]; then
- exit 1
- fi
- done
- done
-fi
+# Teardown
+teardown $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
diff --git a/vagrant/tests/integration_vFW.sh b/vagrant/tests/integration_vFW.sh
index fa48d7c5..ee0205cb 100755
--- a/vagrant/tests/integration_vFW.sh
+++ b/vagrant/tests/integration_vFW.sh
@@ -12,284 +12,33 @@ set -o errexit
set -o nounset
set -o pipefail
-rm -f $HOME/*.yaml
-packetgen_deployment_name=packetgen
-sink_deployment_name=sink
-firewall_deployment_name=firewall
-image_name=virtlet.cloud/ubuntu/16.04
+source _common.sh
+source _functions.sh
+csar_id=66fea6f0-b74d-11e8-95a0-525400feed26
+
+# Setup
if [[ ! -f $HOME/.ssh/id_rsa.pub ]]; then
echo -e "\n\n\n" | ssh-keygen -t rsa -N ""
fi
-ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
-
-cat << NET > $HOME/unprotected-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: unprotected-private-net-cidr
-spec:
- config: '{
- "name": "unprotected",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.10.0/24"
- }
-}'
-NET
-
-cat << NET > $HOME/protected-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: protected-private-net-cidr
-spec:
- config: '{
- "name": "protected",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.20.0/24"
- }
-}'
-NET
-
-cat << NET > $HOME/onap-private-net-cidr-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: onap-private-net-cidr
-spec:
- config: '{
- "name": "onap",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "10.10.0.0/16"
- }
-}'
-NET
-
-proxy="#!/bin/bash"
-if [[ -n "${http_proxy+x}" ]]; then
- proxy+="
- export http_proxy=$http_proxy
- echo \"Acquire::http::Proxy \\\"$http_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy
-"
-fi
-if [[ -n "${https_proxy+x}" ]]; then
- proxy+="
- export https_proxy=$https_proxy
- echo \"Acquire::https::Proxy \\\"$https_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy
-"
-fi
-if [[ -n "${no_proxy+x}" ]]; then
- proxy+="
- export no_proxy=$no_proxy"
-fi
-
-cat << DEPLOYMENT > $HOME/$packetgen_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $packetgen_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- VirtletCloudInitUserData: |
- users:
- - default
- - name: admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
- ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$packetgen_deployment_name | sudo -E bash
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
- ]'
- kubernetes.io/target-runtime: virtlet.cloud
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $packetgen_deployment_name
- image: $image_name
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 256Mi
-DEPLOYMENT
-
-cat << DEPLOYMENT > $HOME/$firewall_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $firewall_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- VirtletCloudInitUserData: |
- users:
- - default
- - name: admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
- ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$firewall_deployment_name | sudo -E bash
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
- ]'
- kubernetes.io/target-runtime: virtlet.cloud
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $firewall_deployment_name
- image: $image_name
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 160Mi
-DEPLOYMENT
-
-cat << DEPLOYMENT > $HOME/$sink_deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $sink_deployment_name
- labels:
- app: vFirewall
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: vFirewall
- template:
- metadata:
- labels:
- app: vFirewall
- annotations:
- VirtletCloudInitUserData: |
- users:
- - default
- - name: admin
- sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
- ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$sink_deployment_name | sudo -E bash
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
- { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
- ]'
- kubernetes.io/target-runtime: virtlet.cloud
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $sink_deployment_name
- image: $image_name
- imagePullPolicy: IfNotPresent
- tty: true
- stdin: true
- resources:
- limits:
- memory: 160Mi
-DEPLOYMENT
-
-if $(kubectl version &>/dev/null); then
- kubectl apply -f $HOME/unprotected-private-net-cidr-network.yaml
- kubectl apply -f $HOME/protected-private-net-cidr-network.yaml
- kubectl apply -f $HOME/onap-private-net-cidr-network.yaml
-
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- kubectl delete deployment $deployment_name --ignore-not-found=true --now
- while kubectl get deployment $deployment_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$deployment_name.yaml
- done
-
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- status_phase=""
- while [[ $status_phase != "Running" ]]; do
- new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
- if [[ $new_phase != $status_phase ]]; then
- echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
- status_phase=$new_phase
- fi
- if [[ $new_phase == "Err"* ]]; then
- exit 1
- fi
- done
- done
- for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
- pod_name=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
- vm=$(kubectl plugin virt virsh list | grep ".*$deployment_name" | awk '{print $2}')
- echo "Pod name: $pod_name Virsh domain: $vm"
- echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")"
- echo "=== Virtlet details ===="
- echo "$(kubectl plugin virt virsh dumpxml $vm | grep VIRTLET_)\n"
- done
-fi
+popule_CSAR_vms_vFW $csar_id
+
+pushd ${CSAR_DIR}/${csar_id}
+for network in unprotected-private-net-cidr-network protected-private-net-cidr-network onap-private-net-cidr-network; do
+ kubectl apply -f $network.yaml
+done
+setup $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
+
+# Test
+for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
+ pod_name=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
+ vm=$(kubectl plugin virt virsh list | grep ".*$deployment_name" | awk '{print $2}')
+ echo "Pod name: $pod_name Virsh domain: $vm"
+ echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")"
+ echo "=== Virtlet details ===="
+ echo "$(kubectl plugin virt virsh dumpxml $vm | grep VIRTLET_)\n"
+done
+popd
+
+# Teardown
+teardown $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
diff --git a/vagrant/tests/multus.sh b/vagrant/tests/multus.sh
index c5f7fc71..96fa37d8 100755
--- a/vagrant/tests/multus.sh
+++ b/vagrant/tests/multus.sh
@@ -12,112 +12,29 @@ set -o errexit
set -o nounset
set -o pipefail
-rm -f $HOME/*.yaml
+source _common.sh
+source _functions.sh
-pod_name=multus-pod
-deployment_name=multus-deployment
+csar_id=49408ca6-b75b-11e8-8076-525400feed26
-cat << NET > $HOME/bridge-network.yaml
-apiVersion: "kubernetes.cni.cncf.io/v1"
-kind: Network
-metadata:
- name: bridge-conf
-spec:
- config: '{
- "name": "mynet",
- "type": "bridge",
- "ipam": {
- "type": "host-local",
- "subnet": "10.10.0.0/16"
- }
-}'
-NET
+# Setup
+popule_CSAR_multus $csar_id
-cat << POD > $HOME/$pod_name.yaml
-apiVersion: v1
-kind: Pod
-metadata:
- name: $pod_name
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "bridge-conf", "interfaceRequest": "eth1" },
- { "name": "bridge-conf", "interfaceRequest": "eth2" }
- ]'
-spec: # specification of the pod's contents
- containers:
- - name: $pod_name
- image: "busybox"
- command: ["top"]
- stdin: true
- tty: true
-POD
+pushd ${CSAR_DIR}/${csar_id}
+kubectl apply -f bridge-network.yaml
-cat << DEPLOYMENT > $HOME/$deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $deployment_name
- labels:
- app: multus
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: multus
- template:
- metadata:
- labels:
- app: multus
- annotations:
- kubernetes.v1.cni.cncf.io/networks: '[
- { "name": "bridge-conf", "interfaceRequest": "eth1" },
- { "name": "bridge-conf", "interfaceRequest": "eth2" }
- ]'
- spec:
- containers:
- - name: $deployment_name
- image: "busybox"
- command: ["top"]
- stdin: true
- tty: true
-DEPLOYMENT
+setup $multus_deployment_name
-if $(kubectl version &>/dev/null); then
- kubectl apply -f $HOME/bridge-network.yaml
-
- kubectl delete pod $pod_name --ignore-not-found=true --now
- kubectl delete deployment $deployment_name --ignore-not-found=true --now
- while kubectl get pod $pod_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$pod_name.yaml
- while kubectl get deployment $deployment_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$deployment_name.yaml
- sleep 5
-
- deployment_pod=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
- for pod in $pod_name $deployment_pod; do
- status_phase=""
- while [[ $status_phase != "Running" ]]; do
- new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
- if [[ $new_phase != $status_phase ]]; then
- echo "$(date +%H:%M:%S) - $pod : $new_phase"
- status_phase=$new_phase
- fi
- if [[ $new_phase == "Err"* ]]; then
- exit 1
- fi
- done
- done
-
- for pod in $pod_name $deployment_pod; do
- echo "===== $pod details ====="
- kubectl exec -it $pod -- ip a
- multus_nic=$(kubectl exec -it $pod -- ifconfig | grep "eth1")
- if [ -z "$multus_nic" ]; then
- exit 1
- fi
- done
+# Test
+deployment_pod=$(kubectl get pods | grep $multus_deployment_name | awk '{print $1}')
+echo "===== $deployment_pod details ====="
+kubectl exec -it $deployment_pod -- ip a
+multus_nic=$(kubectl exec -it $deployment_pod -- ifconfig | grep "eth1")
+if [ -z "$multus_nic" ]; then
+ echo "The $deployment_pod pod doesn't contain the eth1 nic"
+ exit 1
fi
+popd
+
+# Teardown
+teardown $multus_deployment_name
diff --git a/vagrant/tests/virtlet.sh b/vagrant/tests/virtlet.sh
index a8af071f..4a43ff34 100755
--- a/vagrant/tests/virtlet.sh
+++ b/vagrant/tests/virtlet.sh
@@ -12,134 +12,27 @@ set -o errexit
set -o nounset
set -o pipefail
-rm -f $HOME/*.yaml
+source _common.sh
+source _functions.sh
-virtlet_image=virtlet.cloud/fedora
-pod_name=virtlet-pod
-deployment_name=virtlet-deployment
+csar_id=6b54a728-b76a-11e8-a1ba-52540053ccc8
-cat << POD > $HOME/$pod_name.yaml
-apiVersion: v1
-kind: Pod
-metadata:
- name: $pod_name
- annotations:
- # This tells CRI Proxy that this pod belongs to Virtlet runtime
- kubernetes.io/target-runtime: virtlet.cloud
- VirtletCloudInitUserDataScript: |
- #!/bin/sh
- echo hello world
-spec:
- # This nodeAffinity specification tells Kubernetes to run this
- # pod only on the nodes that have extraRuntime=virtlet label.
- # This label is used by Virtlet DaemonSet to select nodes
- # that must have Virtlet runtime
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $pod_name
- # This specifies the image to use.
- # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
- # of the image name is prepended with https:// and used to download the image
- image: $virtlet_image
- imagePullPolicy: IfNotPresent
- # tty and stdin required for "kubectl attach -t" to work
- tty: true
- stdin: true
- resources:
- limits:
- # This memory limit is applied to the libvirt domain definition
- memory: 160Mi
-POD
+# Setup
+popule_CSAR_virtlet $csar_id
-cat << DEPLOYMENT > $HOME/$deployment_name.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: $deployment_name
- labels:
- app: virtlet
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: virtlet
- template:
- metadata:
- labels:
- app: virtlet
- annotations:
- # This tells CRI Proxy that this pod belongs to Virtlet runtime
- kubernetes.io/target-runtime: virtlet.cloud
- VirtletCloudInitUserDataScript: |
- #!/bin/sh
- echo hello world
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: extraRuntime
- operator: In
- values:
- - virtlet
- containers:
- - name: $deployment_name
- # This specifies the image to use.
- # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
- # of the image name is prepended with https:// and used to download the image
- image: $virtlet_image
- imagePullPolicy: IfNotPresent
- # tty and stdin required for "kubectl attach -t" to work
- tty: true
- stdin: true
- resources:
- limits:
- # This memory limit is applied to the libvirt domain definition
- memory: 160Mi
-DEPLOYMENT
+pushd ${CSAR_DIR}/${csar_id}
-if $(kubectl version &>/dev/null); then
- kubectl delete pod $pod_name --ignore-not-found=true --now
- kubectl delete deployment $deployment_name --ignore-not-found=true --now
- while kubectl get pod $pod_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$pod_name.yaml
- while kubectl get deployment $deployment_name &>/dev/null; do
- sleep 5
- done
- kubectl create -f $HOME/$deployment_name.yaml
- sleep 5
+setup $virtlet_deployment_name
- deployment_pod=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
- for pod in $pod_name $deployment_pod; do
- status_phase=""
- while [[ $status_phase != "Running" ]]; do
- new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
- if [[ $new_phase != $status_phase ]]; then
- echo "$(date +%H:%M:%S) - $pod : $new_phase"
- status_phase=$new_phase
- fi
- if [[ $new_phase == "Err"* ]]; then
- exit 1
- fi
- done
- done
-
- kubectl plugin virt virsh list
- for pod in $pod_name $deployment_name; do
- virsh_image=$(kubectl plugin virt virsh list | grep "virtlet-.*-$pod")
- if [[ -z "$virsh_image" ]]; then
- exit 1
- fi
- done
+# Test
+kubectl plugin virt virsh list
+deployment_pod=$(kubectl get pods | grep $virtlet_deployment_name | awk '{print $1}')
+virsh_image=$(kubectl plugin virt virsh list | grep "virtlet-.*-$deployment_pod")
+if [[ -z "$virsh_image" ]]; then
+ echo "There is no Virtual Machine running by $deployment_pod pod"
+ exit 1
fi
+popd
+
+# Teardown
+teardown $virtlet_deployment_name