summaryrefslogtreecommitdiffstats
path: root/vagrant
diff options
context:
space:
mode:
Diffstat (limited to 'vagrant')
-rwxr-xr-xvagrant/tests/_common.sh135
-rwxr-xr-xvagrant/tests/integration_vFW.sh6
-rwxr-xr-xvagrant/tests/virtlet.sh11
3 files changed, 113 insertions, 39 deletions
diff --git a/vagrant/tests/_common.sh b/vagrant/tests/_common.sh
index fb5d1798..5635907c 100755
--- a/vagrant/tests/_common.sh
+++ b/vagrant/tests/_common.sh
@@ -212,8 +212,41 @@ resources:
- $packetgen_deployment_name.yaml
- $firewall_deployment_name.yaml
- $sink_deployment_name.yaml
+ service:
+ - sink-service.yaml
+ ingress:
+ - sink-ingress.yaml
META
+ cat << SERVICE > sink-service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: sink-service
+ labels:
+ app: vFirewall
+spec:
+ ports:
+ - port: 667
+ selector:
+ app: vFirewall
+SERVICE
+
+ cat << INGRESS > sink-ingress.yaml
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: sink-ingress
+spec:
+ rules:
+ - host: sink.vfirewall.demo.com
+ http:
+ paths:
+ - backend:
+ serviceName: sink-service
+ servicePort: 667
+INGRESS
+
cat << NET > unprotected-private-net-cidr-network.yaml
apiVersion: "kubernetes.cni.cncf.io/v1"
kind: Network
@@ -262,20 +295,23 @@ spec:
}'
NET
- proxy="#!/bin/bash"
+ proxy="apt:"
+ cloud_init_proxy=""
if [[ -n "${http_proxy+x}" ]]; then
proxy+="
- export http_proxy=$http_proxy
- echo \"Acquire::http::Proxy \\\"$http_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy"
+ http_proxy: $http_proxy"
+ cloud_init_proxy+="
+ - export http_proxy=$http_proxy"
fi
if [[ -n "${https_proxy+x}" ]]; then
proxy+="
- export https_proxy=$https_proxy
- echo \"Acquire::https::Proxy \\\"$https_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy"
+ https_proxy: $https_proxy"
+ cloud_init_proxy+="
+ - export https_proxy=$https_proxy"
fi
if [[ -n "${no_proxy+x}" ]]; then
- proxy+="
- export no_proxy=$no_proxy"
+ cloud_init_proxy+="
+ - export no_proxy=$no_proxy"
fi
cat << DEPLOYMENT > $packetgen_deployment_name.yaml
@@ -296,18 +332,24 @@ spec:
app: vFirewall
annotations:
VirtletCloudInitUserData: |
+ ssh_pwauth: True
users:
- - default
- name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
+ lock_passwd: false
+ # the password is "admin"
+ passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$packetgen_deployment_name | sudo -E bash
+ $ssh_key
+ $proxy
+ runcmd:
+ $cloud_init_proxy
+ - wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$packetgen_deployment_name | sudo -E bash
+ VirtletSSHKeys: |
+ $ssh_key
kubernetes.v1.cni.cncf.io/networks: '[
{ "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
{ "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
@@ -332,6 +374,8 @@ spec:
resources:
limits:
memory: 256Mi
+ ports:
+ - containerPort: 8183
DEPLOYMENT
cat << DEPLOYMENT > $firewall_deployment_name.yaml
@@ -352,18 +396,22 @@ spec:
app: vFirewall
annotations:
VirtletCloudInitUserData: |
+ ssh_pwauth: True
users:
- - default
- name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
+ lock_passwd: false
+ # the password is "admin"
+ passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$firewall_deployment_name | sudo -E bash
+ $ssh_key
+ $proxy
+ runcmd:
+ $cloud_init_proxy
+ - wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$firewall_deployment_name | sudo -E bash
kubernetes.v1.cni.cncf.io/networks: '[
{ "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
{ "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
@@ -409,18 +457,24 @@ spec:
app: vFirewall
annotations:
VirtletCloudInitUserData: |
+ ssh_pwauth: True
users:
- - default
- name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
sudo: ALL=(ALL) NOPASSWD:ALL
- plain_text_passwd: secret
- groups: sudo
+ lock_passwd: false
+ # the password is "admin"
+ passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
ssh_authorized_keys:
- - $ssh_key
- VirtletCloudInitUserDataScript: |
- $proxy
-
- wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$sink_deployment_name | sudo -E bash
+ $ssh_key
+ $proxy
+ runcmd:
+ $cloud_init_proxy
+ - wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$sink_deployment_name | sudo -E bash
+ VirtletSSHKeys: |
+ $ssh_key
kubernetes.v1.cni.cncf.io/networks: '[
{ "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
{ "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
@@ -445,6 +499,8 @@ spec:
resources:
limits:
memory: 160Mi
+ ports:
+ - containerPort: 667
DEPLOYMENT
popd
}
@@ -546,9 +602,20 @@ spec:
annotations:
# This tells CRI Proxy that this pod belongs to Virtlet runtime
kubernetes.io/target-runtime: virtlet.cloud
- VirtletCloudInitUserDataScript: |
- #!/bin/sh
- echo hello world
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: testuser
+ gecos: User
+ primary-group: testuser
+ groups: users
+ lock_passwd: false
+ shell: /bin/bash
+ # the password is "testuser"
+ passwd: "\$6\$rounds=4096\$wPs4Hz4tfs\$a8ssMnlvH.3GX88yxXKF2cKMlVULsnydoOKgkuStTErTq2dzKZiIx9R/pPWWh5JLxzoZEx7lsSX5T2jW5WISi1"
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ runcmd:
+ - echo hello world
spec:
affinity:
nodeAffinity:
diff --git a/vagrant/tests/integration_vFW.sh b/vagrant/tests/integration_vFW.sh
index ee0205cb..df27065a 100755
--- a/vagrant/tests/integration_vFW.sh
+++ b/vagrant/tests/integration_vFW.sh
@@ -24,10 +24,11 @@ fi
popule_CSAR_vms_vFW $csar_id
pushd ${CSAR_DIR}/${csar_id}
-for network in unprotected-private-net-cidr-network protected-private-net-cidr-network onap-private-net-cidr-network; do
- kubectl apply -f $network.yaml
+for resource in unprotected-private-net-cidr-network protected-private-net-cidr-network onap-private-net-cidr-network sink-service sink-ingress; do
+ kubectl apply -f $resource.yaml
done
setup $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
+#kubectl port-forward deployment/$sink_deployment_name 667:667
# Test
for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
@@ -35,6 +36,7 @@ for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sin
vm=$(kubectl plugin virt virsh list | grep ".*$deployment_name" | awk '{print $2}')
echo "Pod name: $pod_name Virsh domain: $vm"
echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")"
+ echo "kubectl attach -it $pod_name"
echo "=== Virtlet details ===="
echo "$(kubectl plugin virt virsh dumpxml $vm | grep VIRTLET_)\n"
done
diff --git a/vagrant/tests/virtlet.sh b/vagrant/tests/virtlet.sh
index 4a43ff34..f941789c 100755
--- a/vagrant/tests/virtlet.sh
+++ b/vagrant/tests/virtlet.sh
@@ -25,13 +25,18 @@ pushd ${CSAR_DIR}/${csar_id}
setup $virtlet_deployment_name
# Test
-kubectl plugin virt virsh list
deployment_pod=$(kubectl get pods | grep $virtlet_deployment_name | awk '{print $1}')
-virsh_image=$(kubectl plugin virt virsh list | grep "virtlet-.*-$deployment_pod")
-if [[ -z "$virsh_image" ]]; then
+vm_name=$(kubectl plugin virt virsh list | grep "virtlet-.*-$virtlet_deployment_name" | awk '{print $2}')
+vm_status=$(kubectl plugin virt virsh list | grep "virtlet-.*-$virtlet_deployment_name" | awk '{print $3}')
+if [[ "$vm_status" != "running" ]]; then
echo "There is no Virtual Machine running by $deployment_pod pod"
exit 1
fi
+echo "Pod name: $deployment_pod Virsh domain: $vm_name"
+echo "ssh testuser@$(kubectl get pods $deployment_pod -o jsonpath="{.status.podIP}")"
+echo "kubectl attach -it $deployment_pod"
+echo "=== Virtlet details ===="
+echo "$(kubectl plugin virt virsh dumpxml $vm_name | grep VIRTLET_)\n"
popd
# Teardown