aboutsummaryrefslogtreecommitdiffstats
path: root/kud
diff options
context:
space:
mode:
Diffstat (limited to 'kud')
-rw-r--r--kud/README.md37
-rw-r--r--kud/deployment_infra/galaxy-requirements.yml17
-rw-r--r--kud/deployment_infra/playbooks/Debian.yml22
-rw-r--r--kud/deployment_infra/playbooks/RedHat.yml19
-rw-r--r--kud/deployment_infra/playbooks/Suse.yml20
-rw-r--r--kud/deployment_infra/playbooks/configure-istio.yml50
-rw-r--r--kud/deployment_infra/playbooks/configure-kud.yml16
-rw-r--r--kud/deployment_infra/playbooks/configure-multus.yml120
-rw-r--r--kud/deployment_infra/playbooks/configure-nfd.yml61
-rw-r--r--kud/deployment_infra/playbooks/configure-ovn-kubernetes.yml136
-rw-r--r--kud/deployment_infra/playbooks/configure-ovn.yml109
-rw-r--r--kud/deployment_infra/playbooks/configure-ovn4nfv.yml98
-rw-r--r--kud/deployment_infra/playbooks/configure-virtlet.yml250
-rw-r--r--kud/deployment_infra/playbooks/kud-vars.yml63
-rw-r--r--kud/hosting_providers/baremetal/README.md22
-rw-r--r--kud/hosting_providers/vagrant/README.md36
-rw-r--r--kud/hosting_providers/vagrant/Vagrantfile130
-rwxr-xr-xkud/hosting_providers/vagrant/aio.sh58
-rw-r--r--kud/hosting_providers/vagrant/config/default.yml53
-rw-r--r--kud/hosting_providers/vagrant/config/samples/pdf.yml.aio25
-rw-r--r--kud/hosting_providers/vagrant/config/samples/pdf.yml.mini33
-rw-r--r--kud/hosting_providers/vagrant/insecure_keys/key27
-rw-r--r--kud/hosting_providers/vagrant/insecure_keys/key.pub1
l---------kud/hosting_providers/vagrant/installer1
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh240
-rw-r--r--kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml71
-rwxr-xr-xkud/hosting_providers/vagrant/node.sh88
-rwxr-xr-xkud/hosting_providers/vagrant/setup.sh201
-rwxr-xr-xkud/tests/_common.sh1118
-rwxr-xr-xkud/tests/_functions.sh155
-rw-r--r--kud/tests/cFW/README.md10
-rw-r--r--kud/tests/cFW/Vagrantfile33
-rw-r--r--kud/tests/cFW/darkstat/Dockerfile14
-rw-r--r--kud/tests/cFW/docker-compose.yml38
-rw-r--r--kud/tests/cFW/firewall/Dockerfile49
-rw-r--r--kud/tests/cFW/packetgen/Dockerfile44
-rwxr-xr-xkud/tests/cFW/postinstall.sh83
-rw-r--r--kud/tests/cFW/sink/Dockerfile34
-rw-r--r--kud/tests/cFW/vpp/80-vpp.conf15
-rw-r--r--kud/tests/cFW/vpp/Dockerfile17
-rw-r--r--kud/tests/generic_simulator/Dockerfile27
-rw-r--r--kud/tests/generic_simulator/aai/responses.yml18
-rw-r--r--kud/tests/generic_simulator/generic_sim.py109
-rw-r--r--kud/tests/generic_simulator/requirements.txt11
-rwxr-xr-xkud/tests/integration_cFW.sh33
-rwxr-xr-xkud/tests/integration_vFW.sh45
-rwxr-xr-xkud/tests/integration_vcFW.sh55
-rwxr-xr-xkud/tests/istio.sh40
-rwxr-xr-xkud/tests/multus.sh40
-rwxr-xr-xkud/tests/nfd.sh62
-rwxr-xr-xkud/tests/ovn-kubernetes.sh136
-rwxr-xr-xkud/tests/ovn4nfv.sh46
-rwxr-xr-xkud/tests/plugin.sh161
-rwxr-xr-xkud/tests/plugin_edgex.sh69
-rw-r--r--kud/tests/vFW/README.md50
-rw-r--r--kud/tests/vFW/Vagrantfile66
-rw-r--r--kud/tests/vFW/diagram.pngbin0 -> 246934 bytes
-rwxr-xr-xkud/tests/vFW/firewall96
-rwxr-xr-xkud/tests/vFW/packetgen83
-rwxr-xr-xkud/tests/vFW/sink48
-rwxr-xr-xkud/tests/virtlet.sh43
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/Chart.yaml18
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/charts/consul/Chart.yaml18
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/charts/consul/templates/consul-deployment.yaml50
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/charts/consul/templates/consul-service.yaml17
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/charts/consul/values.yaml76
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/charts/mongo/Chart.yaml18
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/charts/mongo/templates/mongo-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/charts/mongo/templates/mongo-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/charts/mongo/values.yaml71
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/command-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/command-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/data-deployment.yaml50
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/data-service.yaml17
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/device-bluetooth-deployment.yaml51
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/device-bluetooth-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/export-client-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/export-client-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/export-distro-deployment.yaml50
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/export-distro-service.yaml17
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/logging-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/logging-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/metadata-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/metadata-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/notifications-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/notifications-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/pv.yaml99
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/pvc.yaml143
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/rulesengine-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/rulesengine-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/scheduler-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/templates/scheduler-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/helm/edgex/values.yaml125
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/command-deployment.yaml46
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/consul-deployment.yaml48
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/data-deployment.yaml47
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/device-bluetooth-deployment.yaml49
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/export-client-deployment.yaml46
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/export-distro-deployment.yaml47
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/logging-deployment.yaml47
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/metadata-deployment.yaml46
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/mongo-deployment.yaml46
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/notifications-deployment.yaml46
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/rulesengine-deployment.yaml46
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/deployments/scheduler-deployment.yaml46
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/metadata.yaml27
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/command-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/consul-service.yaml21
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/data-service.yaml18
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/device-bluetooth-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/export-client-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/export-distro-service.yaml18
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/logging-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/metadata-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/mongo-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/notifications-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/rulesengine-service.yaml15
-rw-r--r--kud/tests/vnfs/edgex/kubernetes/services/scheduler-service.yaml15
-rw-r--r--kud/tests/vnfs/testrb/helm/profile/manifest.yaml7
-rw-r--r--kud/tests/vnfs/testrb/helm/profile/override_values.yaml7
-rw-r--r--kud/tests/vnfs/testrb/helm/profile/testfol/subdir/deployment.yaml51
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/Chart.yaml19
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/Chart.yaml18
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_name.tpl31
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_namespace.tpl26
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_repository.tpl48
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_service.tpl31
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/values.yaml44
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/templates/deployment.yaml62
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/templates/service.yaml39
-rw-r--r--kud/tests/vnfs/testrb/helm/vault-consul-dev/values.yaml90
131 files changed, 7443 insertions, 0 deletions
diff --git a/kud/README.md b/kud/README.md
new file mode 100644
index 00000000..20640424
--- /dev/null
+++ b/kud/README.md
@@ -0,0 +1,37 @@
+# Kubernetes Deployment
+
+## Summary
+
+This project offers a means for deploying a Kubernetes cluster
+that satisfies the requirements of [ONAP multicloud/k8s plugin][1]. Its
+ansible playbooks allow to provision a deployment on Virtual Machines and on Baremetal.
+
+![Diagram](../../../docs/img/diagram.png)
+
+# Components
+
+| Name | Description | Source | Status |
+|:--------------:|:----------------------------------------------|:----------------------------------|:------:|
+| Kubernetes | Base Kubernetes deployment | [kubespray][2] | Done |
+| ovn4nfv | Integrates Opensource Virtual Networking | [configure-ovn4nfv.yml][3] | Tested |
+| Virtlet | Allows to run VMs | [configure-virtlet.yml][4] | Tested |
+| Multus | Provides Multiple Network support in a pod | [configure-multus.yml][5] | Tested |
+| NFD | Node feature discovery | [configure-nfd.yml][6] | Tested |
+| Istio | Service Mesh platform | [configure-istio.yml][7] | Tested |
+
+## Deployment
+
+The [installer](installer.sh) bash script contains the minimal
+Ubuntu instructions required for running this project.
+
+## License
+
+Apache-2.0
+
+[1]: https://git.onap.org/multicloud/k8s
+[2]: https://github.com/kubernetes-incubator/kubespray
+[3]: playbooks/configure-ovn4nfv.yml
+[4]: playbooks/configure-virtlet.yml
+[5]: playbooks/configure-multus.yml
+[6]: playbooks/configure-nfd.yml
+[7]: playbooks/configure-istio.yml
diff --git a/kud/deployment_infra/galaxy-requirements.yml b/kud/deployment_infra/galaxy-requirements.yml
new file mode 100644
index 00000000..5d232451
--- /dev/null
+++ b/kud/deployment_infra/galaxy-requirements.yml
@@ -0,0 +1,17 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- src: andrewrothstein.go
+ version: v2.1.10
+- src: andrewrothstein.kubectl
+ version: v1.1.18
+- src: andrewrothstein.kubernetes-helm
+ version: v1.2.9
+- src: geerlingguy.docker
+ version: 2.5.2
diff --git a/kud/deployment_infra/playbooks/Debian.yml b/kud/deployment_infra/playbooks/Debian.yml
new file mode 100644
index 00000000..96357fe2
--- /dev/null
+++ b/kud/deployment_infra/playbooks/Debian.yml
@@ -0,0 +1,22 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+openvswitch_service: openvswitch-switch
+openvswitch_pkgs:
+ - openvswitch-common
+ - openvswitch-switch
+ - libopenvswitch
+ - openvswitch-datapath-dkms
+ovn_central_service: ovn-central
+ovn_central_pkgs:
+ - ovn-central # <= 2.8.1-1
+ovn_controller_service: ovn-host
+ovn_pkgs:
+ - ovn-common # <= 2.8.1-1
+ - ovn-host
diff --git a/kud/deployment_infra/playbooks/RedHat.yml b/kud/deployment_infra/playbooks/RedHat.yml
new file mode 100644
index 00000000..fe839bbd
--- /dev/null
+++ b/kud/deployment_infra/playbooks/RedHat.yml
@@ -0,0 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+openvswitch_service:
+openvswitch_pkgs:
+ - openvswitch
+ovn_central_service: ovn-central
+ovn_central_pkgs:
+ - ovn-central # <= 2.8.1-1
+ovn_controller_service: ovn-host
+ovn_pkgs:
+ - ovn-common # <= 2.8.1-1
+ - ovn-host
diff --git a/kud/deployment_infra/playbooks/Suse.yml b/kud/deployment_infra/playbooks/Suse.yml
new file mode 100644
index 00000000..17d1147c
--- /dev/null
+++ b/kud/deployment_infra/playbooks/Suse.yml
@@ -0,0 +1,20 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+openvswitch_service:
+openvswitch_pkgs:
+ - openvswitch
+ - openvswitch-switch
+ovn_central_service: ovn-central
+ovn_central_pkgs:
+ - ovn-central # <= 2.8.1-1
+ovn_controller_service: ovn-host
+ovn_pkgs:
+ - ovn-common # <= 2.8.1-1
+ - ovn-host
diff --git a/kud/deployment_infra/playbooks/configure-istio.yml b/kud/deployment_infra/playbooks/configure-istio.yml
new file mode 100644
index 00000000..72542e5a
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-istio.yml
@@ -0,0 +1,50 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- hosts: localhost
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ roles:
+ - role: andrewrothstein.kubectl
+ kubectl_ver: "v{{ kubectl_version }}"
+ - role: andrewrothstein.kubernetes-helm
+ kubernetes_helm_ver: "v{{ helm_client_version }}"
+ tasks:
+ - name: create istio folder
+ file:
+ state: directory
+ path: "{{ istio_dest }}"
+ - name: getting istio CRDs
+ block:
+ - name: download istio tarball
+ get_url:
+ url: "{{ istio_url }}"
+ dest: "/tmp/istio.tar.gz"
+ - name: extract istio source code
+ unarchive:
+ src: "/tmp/istio.tar.gz"
+ dest: "{{ istio_dest }}"
+ remote_src: yes
+ - name: copy istioctl binary to usr/local/bin folder
+ become: yes
+ command: "mv {{ istio_dest }}/istio-{{ istio_version }}/bin/istioctl /usr/local/bin/"
+ when: istio_source_type == "tarball"
+ - name: create network objects
+ shell: "/usr/local/bin/kubectl apply -f {{ istio_dest }}/istio-{{ istio_version }}/install/kubernetes/helm/istio/templates/crds.yaml"
+ - name: render istio's core components
+ shell: "/usr/local/bin/helm template {{ istio_dest }}/istio-{{ istio_version }}/install/kubernetes/helm/istio --name istio --namespace istio-system > /tmp/istio.yaml"
+ - name: create istio manifest
+ shell: "/usr/local/bin/kubectl create namespace istio-system"
+ ignore_errors: True
+ - name: install the components via the manifest
+ shell: "/usr/local/bin/kubectl apply -f /tmp/istio.yaml"
+ ignore_errors: True
diff --git a/kud/deployment_infra/playbooks/configure-kud.yml b/kud/deployment_infra/playbooks/configure-kud.yml
new file mode 100644
index 00000000..9dcf6f39
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-kud.yml
@@ -0,0 +1,16 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: kube-node
+ become: yes
+ tasks:
+ - name: copy admin.conf file to kube-nodes
+ copy:
+ src: "{{ lookup('env','kud_inventory_folder') }}/artifacts/admin.conf"
+ dest: "/etc/kubernetes/admin.conf"
diff --git a/kud/deployment_infra/playbooks/configure-multus.yml b/kud/deployment_infra/playbooks/configure-multus.yml
new file mode 100644
index 00000000..1f6d6ce9
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-multus.yml
@@ -0,0 +1,120 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: kube-node
+ become: yes
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ roles:
+ - role: andrewrothstein.go
+ go_ver: "{{ go_version }}"
+ when: multus_source_type == "source"
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin/"
+ tasks:
+ - name: create multus binary folder
+ file:
+ state: directory
+ path: "{{ item }}"
+ with_items:
+ - /opt/cni/bin
+ - "{{ multus_dest }}"
+ - name: getting source code
+ block:
+ - name: clone Multus repo
+ git:
+ repo: "{{ multus_url }}"
+ dest: "{{ multus_dest }}"
+ version: "{{ multus_version }}"
+ force: yes
+ - name: build multus source code
+ command: ./build
+ args:
+ chdir: "{{ multus_dest }}"
+ - name: copy multus binary to opt folder
+ command: "mv {{ multus_dest }}/bin/multus /opt/cni/bin/multus"
+ when: multus_source_type == "source"
+ - name: getting binary
+ block:
+ - name: download Multus tarball
+ get_url:
+ url: "{{ multus_url }}"
+ dest: "/tmp/multus.tar.gz"
+ - name: extract multus source code
+ unarchive:
+ src: "/tmp/multus.tar.gz"
+ dest: "{{ multus_dest }}"
+ remote_src: yes
+ - name: copy multus binary to opt folder
+ command: "mv {{ multus_dest }}/multus-cni_v{{ multus_version }}_linux_amd64/multus-cni /opt/cni/bin/multus"
+ when: multus_source_type == "tarball"
+ - name: create multus configuration file
+ blockinfile:
+ marker: ""
+ path: /etc/cni/net.d/00-multus.conf
+ create: yes
+ block: |
+ {
+ "type": "multus",
+ "name": "multus-cni",
+ "cniVersion": "0.3.1",
+ "kubeconfig": "/etc/kubernetes/admin.conf",
+ "delegates": [
+ {
+ "type": "flannel",
+ "cniVersion": "0.3.1",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }
+ ]
+ }
+
+- hosts: localhost
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ roles:
+ - role: andrewrothstein.kubectl
+ kubectl_ver: "v{{ kubectl_version }}"
+ tasks:
+ - name: define a CRD network object specification
+ blockinfile:
+ path: /tmp/crdnetwork.yml
+ create: yes
+ block: |
+ apiVersion: apiextensions.k8s.io/v1beta1
+ kind: CustomResourceDefinition
+ metadata:
+ name: network-attachment-definitions.k8s.cni.cncf.io
+ spec:
+ group: k8s.cni.cncf.io
+ version: v1
+ scope: Namespaced
+ names:
+ plural: network-attachment-definitions
+ singular: network-attachment-definition
+ kind: NetworkAttachmentDefinition
+ shortNames:
+ - net-attach-def
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ config:
+ type: string
+
+ - name: create network objects
+ shell: "/usr/local/bin/kubectl apply -f /tmp/crdnetwork.yml"
+ ignore_errors: True
diff --git a/kud/deployment_infra/playbooks/configure-nfd.yml b/kud/deployment_infra/playbooks/configure-nfd.yml
new file mode 100644
index 00000000..a091d04b
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-nfd.yml
@@ -0,0 +1,61 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- hosts: kube-node
+ tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ - name: clone NFD repo
+ git:
+ repo: "{{ nfd_url }}"
+ dest: "{{ nfd_dest }}"
+ version: "{{ nfd_version }}"
+ force: yes
+ when: nfd_source_type == "source"
+ - name: build NFD image
+ become: yes
+ make:
+ chdir: "{{ nfd_dest }}"
+ - name: get NDF image name
+ become: yes
+ shell: "docker images | grep kubernetes_incubator | awk '{printf(\"%s:%s\\n\", $1,$2)}'"
+ register: nfd_image
+ - name: replace NFD image name
+ lineinfile:
+ path: "{{ nfd_dest }}/node-feature-discovery-{{ item }}.json.template"
+ regexp: "\"image\": \"quay.io/kubernetes_incubator.*i"
+ line: "\"image\": \"{{ nfd_image.stdout }}\","
+ with_items:
+ - daemonset
+ - job
+ - name: copying rbac and daemonset files
+ fetch:
+ src: "{{ nfd_dest }}/{{ item }}"
+ dest: "/tmp/"
+ flat: yes
+ with_items:
+ - rbac.yaml
+ - node-feature-discovery-daemonset.json.template
+
+- hosts: localhost
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ roles:
+ - role: andrewrothstein.kubectl
+ kubectl_ver: "v{{ kubectl_version }}"
+ tasks:
+ - name: create service accounts
+ command: "/usr/local/bin/kubectl apply -f /tmp/{{ item }}"
+ with_items:
+ - rbac.yaml
+ - node-feature-discovery-daemonset.json.template
diff --git a/kud/deployment_infra/playbooks/configure-ovn-kubernetes.yml b/kud/deployment_infra/playbooks/configure-ovn-kubernetes.yml
new file mode 100644
index 00000000..5f1c9f64
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-ovn-kubernetes.yml
@@ -0,0 +1,136 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- import_playbook: configure-ovn.yml
+
+- hosts: ovn-central:ovn-controller
+ vars:
+ central_node_ip: "{{ hostvars[groups['ovn-central'][0]]['ansible_ssh_host'] }}"
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin/"
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ roles:
+ - role: andrewrothstein.go
+ go_ver: "{{ go_version }}"
+ tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ - name: clone ovn-kubernetes repo
+ git:
+ repo: "{{ ovn_kubernetes_url }}"
+ dest: "{{ ovn_kubernetes_dest }}"
+ version: "{{ ovn_kubernetes_version }}"
+ force: yes
+ when: ovn_kubernetes_source_type == "source"
+ - name: getting binaries
+ block:
+ - name: download ovn-kubernetes tarball
+ get_url:
+ url: "{{ ovn_kubernetes_url }}"
+ dest: /tmp/ovn-kubernetes.tar.gz
+ - name: extract ovn-kubernetes source code
+ unarchive:
+ src: /tmp/ovn-kubernetes.tar.gz
+ dest: /tmp/
+ remote_src: yes
+ - name: rename extracted folder
+ command: "mv /tmp/ovn-kubernetes-{{ ovn_kubernetes_version }}/ {{ ovn_kubernetes_dest }}/"
+ when: ovn_kubernetes_source_type == "tarball"
+ - name: make ovnkube files
+ make:
+ chdir: "{{ ovn_kubernetes_dest }}/go-controller"
+ - name: install ovnkube files
+ make:
+ chdir: "{{ ovn_kubernetes_dest }}/go-controller"
+ target: install
+ become: yes
+ - name: create OVN Kubernetes config file
+ become: yes
+ blockinfile:
+ path: /etc/openvswitch/ovn_k8s.conf
+ create: yes
+ block: |
+ [logging]
+ loglevel=5
+ logfile=/var/log/openvswitch/ovnkube.log
+
+ [cni]
+ conf-dir=/etc/cni/net.d
+ plugin=ovn-k8s-cni-overlay
+ - name: create ovnkube logging directory
+ file:
+ path: /var/log/openvswitch
+ state: directory
+
+- hosts: ovn-central
+ become: yes
+ vars:
+ central_node_ip: "{{ hostvars[groups['ovn-central'][0]]['ansible_ssh_host'] }}"
+ tasks:
+ - name: create ovnkube central systemd service
+ blockinfile:
+ path: /etc/systemd/system/ovn-k8s-central.service
+ create: yes
+ block: |
+ [Unit]
+ Description=OVN Central Daemon
+
+ [Service]
+ ExecStart=/usr/bin/ovnkube \
+ -net-controller \
+ -init-master="{{ ansible_hostname }}" \
+ -init-node="{{ ansible_hostname }}" \
+ -nodeport \
+ -k8s-kubeconfig=/etc/kubernetes/admin.conf \
+ -k8s-token="test" \
+ -nb-address="tcp://{{ central_node_ip }}:6641" \
+ -sb-address="tcp://{{ central_node_ip }}:6642"
+
+ [Install]
+ WantedBy=multi-user.target
+ - name: start ovnkube central systemd service
+ service:
+ name: ovn-k8s-central
+ state: started
+ enabled: yes
+
+- hosts: ovn-controller
+ become: yes
+ vars:
+ central_node_ip: "{{ hostvars[groups['ovn-central'][0]]['ansible_ssh_host'] }}"
+ tasks:
+ - name: create ovnkube controller systemd service
+ blockinfile:
+ path: /etc/systemd/system/ovn-k8s-host.service
+ create: yes
+ block: |
+ [Unit]
+ Description=OVN Controller Daemon
+
+ [Service]
+ ExecStart=/usr/bin/ovnkube \
+ -init-gateways \
+ -init-node="{{ ansible_hostname }}" \
+ -nodeport \
+ -k8s-kubeconfig=/etc/kubernetes/admin.conf \
+ -k8s-token="test" \
+ -nb-address="tcp://{{ central_node_ip }}:6641" \
+ -sb-address="tcp://{{ central_node_ip }}:6642"
+
+ [Install]
+ WantedBy=multi-user.target
+ - name: start ovnkube controller systemd service
+ service:
+ name: ovn-k8s-host
+ state: started
+ enabled: yes
diff --git a/kud/deployment_infra/playbooks/configure-ovn.yml b/kud/deployment_infra/playbooks/configure-ovn.yml
new file mode 100644
index 00000000..3fd2c765
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-ovn.yml
@@ -0,0 +1,109 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: ovn-central:ovn-controller
+ become: yes
+ tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ ansible_os_family }}.yml"
+ - name: get Wand GPI files
+ get_url:
+ url: https://packages.wand.net.nz/keyring.gpg
+ dest: /etc/apt/trusted.gpg.d/wand.gpg
+ - name: add WAND Debian Repo
+ apt_repository:
+ repo: "deb https://packages.wand.net.nz {{ ansible_lsb.codename }} main"
+ state: present
+ - name: install OpenVSwitch packages
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ openvswitch_pkgs }}"
+ - name: install Open Virtual Network components
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ ovn_pkgs }}"
+ - name: start OpenVSwitch services
+ service:
+ name: "{{ openvswitch_service }}"
+ state: started
+
+- hosts: ovn-central
+ become: yes
+ tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ ansible_os_family }}.yml"
+ - name: install Open Virtual Network central components
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ ovn_central_pkgs }}"
+ - name: enable remote connections to southbound and northbound dbs
+ lineinfile:
+ path: /etc/default/ovn-central
+ line: "OVN_CTL_OPTS=\" --db-sb-create-insecure-remote=yes --db-nb-create-insecure-remote=yes\""
+ state: present
+ when: ansible_os_family == "Debian"
+ - name: start OVN northbound database services
+ service:
+ name: "{{ ovn_central_service }}"
+ state: restarted
+
+- hosts: ovn-controller
+ become: yes
+ vars:
+ ovn_central_ips: "{{ groups['ovn-central'] | map('extract', hostvars, ['ansible_ssh_host']) | join(',') }}"
+ tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ ansible_os_family }}.yml"
+ - name: stop the ovn-controller service
+ service:
+ name: "{{ ovn_controller_service }}"
+ state: stopped
+ - name: configure OpenVSwitch databases
+ openvswitch_db:
+ table: Open_vSwitch
+ record: .
+ col: external_ids
+ key: ovn-remote
+ value: \""tcp:{{ item }}:6642"\"
+ with_items: "{{ ovn_central_ips }}"
+ - name: enable overlay network protocols
+ openvswitch_db:
+ table: Open_vSwitch
+ record: .
+ col: external_ids
+ key: ovn-encap-type
+ value: geneve
+ - name: configure the overlay network local endpoint IP address.
+ openvswitch_db:
+ table: Open_vSwitch
+ record: .
+ col: external_ids
+ key: ovn-encap-ip
+ value: "{{ ansible_default_ipv4.address }}"
+ - name: start the ovn-controller service
+ service:
+ name: "{{ ovn_controller_service }}"
+ state: started
+ - name: ensuring that br-int bridge exists
+ openvswitch_bridge:
+ bridge: br-int
+ state: present
+ fail_mode: secure
diff --git a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
new file mode 100644
index 00000000..f8dabd9d
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
@@ -0,0 +1,98 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- import_playbook: configure-ovn.yml
+- import_playbook: configure-multus.yml
+
+- hosts: kube-master:kube-node
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin/"
+ roles:
+ - role: andrewrothstein.go
+ tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ - name: clone ovn4nfv-k8s-plugin repo
+ git:
+ repo: "{{ ovn4nfv_url }}"
+ dest: "{{ ovn4nfv_dest }}"
+ version: "{{ ovn4nfv_version }}"
+ force: yes
+ when: ovn4nfv_source_type == "source"
+ - name: clean ovn4nfvk8s left over files
+ make:
+ chdir: "{{ ovn4nfv_dest }}"
+ target: clean
+ - name: build ovn4nfvk8s-cni
+ make:
+ chdir: "{{ ovn4nfv_dest }}"
+ target: ovn4nfvk8s-cni
+ become: yes
+ environment:
+ GOPATH: "{{ go_path }}"
+ - name: copy ovn4nfvk8s-cni to cni folder
+ command: "mv {{ ovn4nfv_dest }}/ovn4nfvk8s-cni /opt/cni/bin/ovn4nfvk8s-cni"
+ become: yes
+ - name: create ovn4k8s config file
+ become: yes
+ blockinfile:
+ path: /etc/openvswitch/ovn4nfv_k8s.conf
+ create: yes
+ block: |
+ [logging]
+ loglevel=5
+ logfile=/var/log/openvswitch/ovn4k8s.log
+
+ [cni]
+ conf-dir=/etc/cni/net.d
+ plugin=ovn4nfvk8s-cni
+
+ [kubernetes]
+ kubeconfig=/etc/kubernetes/admin.conf
+ - name: create ovnkube logging directory
+ file:
+ path: /var/log/openvswitch
+ state: directory
+
+- hosts: kube-master
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin/"
+ become: yes
+ tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ - name: build ovn4nfvk8s
+ make:
+ chdir: "{{ ovn4nfv_dest }}"
+ target: ovn4nfvk8s
+ environment:
+ GOPATH: "{{ go_path }}"
+ - name: copy ovn4nfvk8s to /usr/bin folder
+ command: "mv {{ ovn4nfv_dest }}/ovn4nfvk8s /usr/bin/ovn4nfvk8s"
+ - name: create ovn4nfvk8s systemd service
+ blockinfile:
+ path: /etc/systemd/system/ovn4nfvk8s.service
+ create: yes
+ block: |
+ [Unit]
+ Description=OVN4NFV Kubernetes Daemon
+
+ [Service]
+ ExecStart=/usr/bin/ovn4nfvk8s \
+ -k8s-kubeconfig=/etc/kubernetes/admin.conf
+
+ [Install]
+ WantedBy=multi-user.target
+ - name: start ovn4nfvk8s systemd service
+ service:
+ name: ovn4nfvk8s
+ state: started
+ enabled: yes
diff --git a/kud/deployment_infra/playbooks/configure-virtlet.yml b/kud/deployment_infra/playbooks/configure-virtlet.yml
new file mode 100644
index 00000000..753e487e
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-virtlet.yml
@@ -0,0 +1,250 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ vars:
+ images_file: /tmp/images.yaml
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ roles:
+ - role: andrewrothstein.kubectl
+ kubectl_ver: "v{{ kubectl_version }}"
+ - role: geerlingguy.docker
+ when: virtlet_source_type == "source"
+ tasks:
+ - name: create Virtlet binary folder
+ file:
+ state: directory
+ path: "{{ virtlet_dest }}"
+ - name: apply virtlet extraRuntime label
+ command: "/usr/local/bin/kubectl label node {{ item }} extraRuntime=virtlet --overwrite"
+ with_inventory_hostnames: virtlet
+ - name: create image translations confimap file
+ blockinfile:
+ path: "{{ images_file }}"
+ create: yes
+ block: |
+ translations:
+ - name: ubuntu/14.04
+ url: https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ - name: ubuntu/16.04
+ url: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ - name: ubuntu/18.04
+ url: https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img
+ - regexp: 'centos/(\d+)-(\d+)'
+ url: 'https://cloud.centos.org/centos/$1/images/CentOS-$1-x86_64-GenericCloud-$2.qcow2'
+ - name: fedora
+ url: https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ {% if lookup('env','http_proxy') != "" %}
+ transports:
+ "":
+ proxy: "{{ lookup('env','http_proxy') }}"
+ {% endif %}
+ - name: install image translations configmap
+ shell: "/usr/local/bin/kubectl create configmap -n kube-system virtlet-image-translations --from-file {{ images_file }} --dry-run -o yaml | /usr/local/bin/kubectl apply -f -"
+ ignore_errors: True
+ - name: create Virtlet folder
+ file:
+ state: directory
+ path: "{{ virtlet_dest }}"
+ - name: getting source code
+ block:
+ - name: clone Virtlet repo
+ git:
+ repo: "{{ virtlet_url }}"
+ dest: "{{ virtlet_dest }}"
+ version: "{{ virtlet_version }}"
+ force: yes
+ - name: configure proxy values for docker service
+ block:
+ - name: create docker config folder
+ become: yes
+ file:
+ state: directory
+ path: "/etc/systemd/system/docker.service.d"
+ - name: Configure docker service to use http_proxy env value
+ become: yes
+ blockinfile:
+ dest: "/etc/systemd/system/docker.service.d/http-proxy.conf"
+ create: yes
+ block: |
+ [Service]
+ Environment="HTTP_PROXY={{ lookup('env','http_proxy') }}"
+ when:
+ - lookup('env','http_proxy') != "fooproxy"
+ - name: Configure docker service to use https_proxy env value
+ become: yes
+ blockinfile:
+ dest: "/etc/systemd/system/docker.service.d/https-proxy.conf"
+ create: yes
+ block: |
+ [Service]
+ Environment="HTTPS_PROXY={{ lookup('env','https_proxy') }}"
+ when:
+ - lookup('env','https_proxy') != "fooproxy"
+ - name: Configure docker service to use no_proxy env value
+ become: yes
+ blockinfile:
+ dest: "/etc/systemd/system/docker.service.d/no-proxy.conf"
+ create: yes
+ block: |
+ [Service]
+ Environment="NO_PROXY={{ lookup('env','no_proxy') }}"
+ when:
+ - lookup('env','no_proxy') != "fooproxy"
+ - name: reload systemd
+ become: yes
+ command: systemctl daemon-reload
+ - name: restart docker service
+ become: yes
+ service:
+ name: docker
+ state: restarted
+ when: lookup('env','http_proxy') != "fooproxy" or lookup('env','https_proxy') != "fooproxy" or lookup('env','no_proxy') != "fooproxy"
+ - name: build virtlet source code
+ command: ./cmd.sh build
+ args:
+ chdir: "{{ virtlet_dest }}/build"
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ when: virtlet_source_type == "source"
+ - name: download virtletctl
+ get_url:
+ url: "{{ virtlet_url }}"
+ dest: "{{ virtlet_dest }}/virtletctl"
+ when: virtlet_source_type == "binary"
+ - name: set virtletctl execution permissions
+ file:
+ path: "{{ virtlet_dest }}/virtletctl"
+ mode: "+x"
+ - name: install virtletctl as kubectl plugin
+ become: yes
+ command: "mv {{ virtlet_dest }}/virtletctl /usr/local/bin/kubectl-virt"
+ - name: create Virtlet k8s objects
+ shell: "/usr/local/bin/kubectl virt gen | /usr/local/bin/kubectl apply -f -"
+ ignore_errors: True
+ - name: wait for Virtlet daemonset
+ shell: "/usr/local/bin/kubectl get ds virtlet -n=kube-system -o=jsonpath --template={.status.numberReady}"
+ register: daemonset
+ until:
+ - '1'
+ retries: 6
+ delay: 10
+
+- hosts: virtlet
+ tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ - name: create CRIProxy binary folder
+ file:
+ state: directory
+ path: "{{ criproxy_dest }}"
+ - name: disable AppArmor in all nodes
+ become: yes
+ service:
+ name: apparmor
+ state: stopped
+ enabled: no
+ when: ansible_os_family == "Debian"
+ - name: modify args for kubelet service
+ become: yes
+ lineinfile:
+ dest: /etc/systemd/system/kubelet.service
+ line: " --container-runtime=remote --container-runtime-endpoint=unix:///run/criproxy.sock --image-service-endpoint=unix:///run/criproxy.sock --enable-controller-attach-detach=false \\"
+ insertafter: '^ExecStart=/usr/local/bin/kubelet *'
+ state: present
+ - name: create dockershim service
+ become: yes
+ blockinfile:
+ path: /etc/systemd/system/dockershim.service
+ create: yes
+ block: |
+ [Unit]
+ Description=dockershim for criproxy
+
+ [Service]
+ EnvironmentFile=-/etc/kubernetes/kubelet.env
+ ExecStartPre=-/bin/mkdir -p /var/lib/kubelet/volume-plugins
+ ExecStart=/usr/local/bin/kubelet --experimental-dockershim --port 11250 \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $KUBELET_API_SERVER \
+ $KUBELET_ADDRESS \
+ $KUBELET_PORT \
+ $KUBELET_HOSTNAME \
+ $KUBE_ALLOW_PRIV \
+ $KUBELET_ARGS \
+ $DOCKER_SOCKET \
+ $KUBELET_NETWORK_PLUGIN \
+ $KUBELET_VOLUME_PLUGIN \
+ $KUBELET_CLOUDPROVIDER
+ Restart=always
+ StartLimitInterval=0
+ RestartSec=10
+
+ [Install]
+ RequiredBy=criproxy.service
+ - name: getting source code
+ block:
+ - name: clone CRIProxy repo
+ git:
+ repo: "{{ criproxy_url }}"
+ dest: "{{ criproxy_dest }}"
+ version: "{{ criproxy_version }}"
+ force: yes
+ - name: build criproxy source code
+ command: ./build-package.sh
+ args:
+ chdir: "{{ criproxy_dest }}"
+ when: criproxy_source_type == "source"
+ - name: download CRIproxy package
+ get_url:
+ url: "{{ criproxy_url }}"
+ dest: "{{ criproxy_dest }}/criproxy"
+ when: criproxy_source_type == "binary"
+ - name: set criproxy execution permissions
+ file:
+ path: "{{ criproxy_dest }}/criproxy"
+ mode: "+x"
+ - name: create criproxy service
+ become: yes
+ blockinfile:
+ path: /etc/systemd/system/criproxy.service
+ create: yes
+ block: |
+ [Unit]
+ Description=CRI Proxy
+
+ [Service]
+ ExecStart={{ criproxy_dest }}/criproxy -v 3 -logtostderr -connect /var/run/dockershim.sock,virtlet.cloud:/run/virtlet.sock -listen /run/criproxy.sock
+ Restart=always
+ StartLimitInterval=0
+ RestartSec=10
+
+ [Install]
+ WantedBy=kubelet.service
+ - name: start criproxy and dockershim services
+ become: yes
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items:
+ - dockershim
+ - criproxy
+ - name: restart kubelet services
+ become: yes
+ service:
+ name: kubelet
+ state: restarted
diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
new file mode 100644
index 00000000..d6bd0ee6
--- /dev/null
+++ b/kud/deployment_infra/playbooks/kud-vars.yml
@@ -0,0 +1,63 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+base_dest: /tmp
+
+multus_dest: "{{ base_dest }}/multus-cni"
+#multus_source_type: "tarball"
+#multus_version: 3.1
+#multus_url: "https://github.com/intel/multus-cni/releases/download/v{{ multus_version }}/multus-cni_v{{ multus_version }}_linux_amd64.tar.gz"
+multus_source_type: "source"
+multus_version: 366f2120cb88c85deab6343b7062fd38fdb0ece9
+multus_url: "https://github.com/ritusood/multus-cni"
+
+ovn_kubernetes_dest: "{{ base_dest }}/ovn-kubernetes"
+ovn_kubernetes_source_type: "tarball"
+ovn_kubernetes_version: 0.3.0
+ovn_kubernetes_url: "https://github.com/openvswitch/ovn-kubernetes/archive/v{{ ovn_kubernetes_version }}.tar.gz"
+#ovn_kubernetes_source_type: "source"
+#ovn_kubernetes_version: 456a0857956988f968bb08644c650ba826592ec1
+#ovn_kubernetes_url: "https://github.com/openvswitch/ovn-kubernetes"
+
+criproxy_dest: "{{ base_dest }}/criproxy"
+criproxy_source_type: "binary"
+criproxy_version: 0.14.0
+criproxy_url: "https://github.com/Mirantis/criproxy/releases/download/v{{ criproxy_version }}/criproxy"
+#criproxy_source_type: "source"
+#criproxy_version: b5ca5a6cec278e2054dface4f7a3e111fb9ab84b
+#criproxy_url: "https://github.com/Mirantis/criproxy"
+virtlet_dest: "{{ base_dest }}/virtlet"
+virtlet_source_type: "binary"
+virtlet_version: 1.4.4
+virtlet_url: "https://github.com/Mirantis/virtlet/releases/download/v{{ virtlet_version }}/virtletctl"
+#virtlet_source_type: "source"
+#virtlet_version: 68e11b8f1db2c78b063126899f0e60910700975d
+#virtlet_url: "https://github.com/Mirantis/virtlet"
+
+nfd_dest: "{{ base_dest }}/nfd"
+nfd_source_type: "source"
+nfd_version: 175305b1ad73be7301ac94add475cec6fef797a9
+nfd_url: "https://github.com/kubernetes-incubator/node-feature-discovery"
+
+istio_dest: "{{ base_dest }}/istio"
+istio_source_type: "tarball"
+istio_version: 1.0.3
+istio_url: "https://github.com/istio/istio/releases/download/{{ istio_version }}/istio-{{ istio_version }}-linux.tar.gz"
+
+go_path: "{{ base_dest }}/go"
+ovn4nfv_dest: "{{ go_path }}/src/ovn4nfv-k8s-plugin"
+ovn4nfv_source_type: "source"
+ovn4nfv_version: 5026d1d89b05eac5e004279b742df6745a73d93a
+ovn4nfv_url: "https://git.opnfv.org/ovn4nfv-k8s-plugin/"
+
+go_version: '1.11'
+kubespray_version: 2.8.2
+kubectl_version: 1.12.2
+helm_client_version: 2.9.1
diff --git a/kud/hosting_providers/baremetal/README.md b/kud/hosting_providers/baremetal/README.md
new file mode 100644
index 00000000..4f81d7b5
--- /dev/null
+++ b/kud/hosting_providers/baremetal/README.md
@@ -0,0 +1,22 @@
+# Kubernetes Deployment
+
+## Summary
+
+This project offers a means for deploying a Kubernetes cluster
+that satisfies the requirements of [ONAP multicloud/k8s plugin][1]. Its
+ansible playbooks allow to provision a deployment on Baremetal.
+
+
+![Diagram](../../../docs/img/installer_workflow.png)
+
+
+## Deployment
+
+The [installer](installer.sh) bash script contains the minimal
+Ubuntu instructions required for running this project.
+
+## License
+
+Apache-2.0
+
+[1]: https://git.onap.org/multicloud/k8s
diff --git a/kud/hosting_providers/vagrant/README.md b/kud/hosting_providers/vagrant/README.md
new file mode 100644
index 00000000..00f0a70f
--- /dev/null
+++ b/kud/hosting_providers/vagrant/README.md
@@ -0,0 +1,36 @@
+# Kubernetes Deployment
+
+## Summary
+
+This project offers a means for deploying a Kubernetes cluster
+that satisfies the requirements of [ONAP multicloud/k8s plugin][1]. Its
+ansible playbooks allow to provision a deployment on Virtual Machines.
+
+![Diagram](../../../docs/img/diagram.png)
+
+## Deployment
+
+The [installer](installer.sh) bash script contains the minimal
+Ubuntu instructions required for running this project.
+
+### Virtual Machines
+
+This project uses [Vagrant tool][2] for provisioning Virtual Machines
+automatically. The [setup](setup.sh) bash script contains the
+Linux instructions to install dependencies and plugins required for
+its usage. This script supports two Virtualization technologies
+(Libvirt and VirtualBox).
+
+ $ ./setup.sh -p libvirt
+
+Once Vagrant is installed, it's possible to provision a cluster using
+the following instructions:
+
+ $ vagrant up && vagrant up installer
+
+## License
+
+Apache-2.0
+
+[1]: https://git.onap.org/multicloud/k8s
+[2]: https://www.vagrantup.com/
diff --git a/kud/hosting_providers/vagrant/Vagrantfile b/kud/hosting_providers/vagrant/Vagrantfile
new file mode 100644
index 00000000..105c7e99
--- /dev/null
+++ b/kud/hosting_providers/vagrant/Vagrantfile
@@ -0,0 +1,130 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+box = {
+ :virtualbox => { :name => 'elastic/ubuntu-16.04-x86_64', :version => '20180708.0.0' },
+ :libvirt => { :name => 'elastic/ubuntu-16.04-x86_64', :version=> '20180210.0.0'}
+}
+
+require 'yaml'
+pdf = File.dirname(__FILE__) + '/config/default.yml'
+if File.exist?(File.dirname(__FILE__) + '/config/pdf.yml')
+ pdf = File.dirname(__FILE__) + '/config/pdf.yml'
+end
+nodes = YAML.load_file(pdf)
+
+# Inventory file creation
+File.open(File.dirname(__FILE__) + "/inventory/hosts.ini", "w") do |inventory_file|
+ inventory_file.puts("[all]")
+ nodes.each do |node|
+ inventory_file.puts("#{node['name']}\tansible_ssh_host=#{node['ip']} ansible_ssh_port=22")
+ end
+ ['kube-master', 'kube-node', 'etcd', 'ovn-central', 'ovn-controller', 'virtlet'].each do|group|
+ inventory_file.puts("\n[#{group}]")
+ nodes.each do |node|
+ if node['roles'].include?("#{group}")
+ inventory_file.puts(node['name'])
+ end
+ end
+ end
+ inventory_file.puts("\n[k8s-cluster:children]\nkube-node\nkube-master")
+end
+
+provider = (ENV['VAGRANT_DEFAULT_PROVIDER'] || :libvirt).to_sym
+puts "[INFO] Provider: #{provider} "
+
+if ENV['no_proxy'] != nil or ENV['NO_PROXY']
+ $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
+ nodes.each do |node|
+ $no_proxy += "," + node['ip']
+ end
+ $subnet = "192.168.121"
+ if provider == :virtualbox
+ $subnet = "10.0.2"
+ end
+ # NOTE: This range is based on vagrant-libvirt network definition CIDR 192.168.121.0/27
+ (1..31).each do |i|
+ $no_proxy += ",#{$subnet}.#{i}"
+ end
+end
+
+Vagrant.configure("2") do |config|
+ config.vm.box = box[provider][:name]
+ config.vm.box_version = box[provider][:version]
+ config.ssh.insert_key = false
+
+ if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil
+ if Vagrant.has_plugin?('vagrant-proxyconf')
+ config.proxy.http = ENV['http_proxy'] || ENV['HTTP_PROXY'] || ""
+ config.proxy.https = ENV['https_proxy'] || ENV['HTTPS_PROXY'] || ""
+ config.proxy.no_proxy = $no_proxy
+ config.proxy.enabled = { docker: false }
+ end
+ end
+
+ nodes.each do |node|
+ config.vm.define node['name'] do |nodeconfig|
+ nodeconfig.vm.hostname = node['name']
+ nodeconfig.vm.network :private_network, :ip => node['ip'], :type => :static
+ nodeconfig.vm.provider 'virtualbox' do |v|
+ v.customize ["modifyvm", :id, "--memory", node['memory']]
+ v.customize ["modifyvm", :id, "--cpus", node['cpus']]
+ if node.has_key? "volumes"
+ node['volumes'].each do |volume|
+ $volume_file = "#{node['name']}-#{volume['name']}.vdi"
+ unless File.exist?($volume_file)
+ v.customize ['createmedium', 'disk', '--filename', $volume_file, '--size', volume['size']]
+ end
+ v.customize ['storageattach', :id, '--storagectl', 'IDE Controller', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', $volume_file]
+ end
+ end
+ end
+ nodeconfig.vm.provider 'libvirt' do |v|
+ v.memory = node['memory']
+ v.cpus = node['cpus']
+ v.nested = true
+ v.cpu_mode = 'host-passthrough'
+ v.management_network_address = "192.168.121.0/27"
+ nodeconfig.vm.provision 'shell' do |sh|
+ sh.path = "node.sh"
+ if node.has_key? "volumes"
+ $volume_mounts_dict = ''
+ node['volumes'].each do |volume|
+ $volume_mounts_dict += "#{volume['name']}=#{volume['mount']},"
+ $volume_file = "./#{node['name']}-#{volume['name']}.qcow2"
+ v.storage :file, :bus => 'sata', :device => volume['name'], :size => volume['size']
+ end
+ sh.args = ['-v', $volume_mounts_dict[0...-1]]
+ end
+ end
+ end
+ end
+ end
+ sync_type = "virtualbox"
+ if provider == :libvirt
+ sync_type = "nfs"
+ end
+ config.vm.define :installer, primary: true, autostart: false do |installer|
+ installer.vm.hostname = "multicloud"
+ installer.vm.network :private_network, :ip => "10.10.10.2", :type => :static
+ installer.vm.synced_folder '../../../', '/home/vagrant/multicloud-k8s/', type: sync_type
+ installer.vm.provision 'shell', privileged: false do |sh|
+ sh.env = {'KUD_PLUGIN_ENABLED': 'true'}
+ sh.inline = <<-SHELL
+ cp /vagrant/insecure_keys/key.pub /home/vagrant/.ssh/id_rsa.pub
+ cp /vagrant/insecure_keys/key /home/vagrant/.ssh/id_rsa
+ chown vagrant /home/vagrant/.ssh/id_rsa
+ chmod 400 /home/vagrant/.ssh/id_rsa
+ cd /home/vagrant/multicloud-k8s/kud/hosting_providers/vagrant/ && ./installer.sh | tee kud_installer.log
+ SHELL
+ end
+ end
+end
diff --git a/kud/hosting_providers/vagrant/aio.sh b/kud/hosting_providers/vagrant/aio.sh
new file mode 100755
index 00000000..31663af5
--- /dev/null
+++ b/kud/hosting_providers/vagrant/aio.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [[ $(whoami) != 'root' ]];then
+ echo "This bash script must be executed as root user"
+ exit 1
+fi
+
+echo "Cloning and configuring KUD project..."
+git clone https://git.onap.org/multicloud/k8s/
+cd k8s/kud/hosting_providers/baremetal/
+cat <<EOL > inventory/hosts.ini
+[all]
+localhost
+
+[kube-master]
+localhost
+
+[kube-node]
+localhost
+
+[etcd]
+localhost
+
+[ovn-central]
+localhost
+
+[ovn-controller]
+localhost
+
+[virtlet]
+localhost
+
+[k8s-cluster:children]
+kube-node
+kube-master
+EOL
+sed -i '/andrewrothstein.kubectl/d' ../../deployment_infra/playbooks/configure-*.yml
+echo -e "\n\n\n" | ssh-keygen -t rsa -N ""
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
+chmod og-wx ~/.ssh/authorized_keys
+
+echo "Enabling nested-virtualization"
+./node.sh
+
+echo "Deploying KRD project"
+./installer.sh | tee kud_installer.log
diff --git a/kud/hosting_providers/vagrant/config/default.yml b/kud/hosting_providers/vagrant/config/default.yml
new file mode 100644
index 00000000..10b93663
--- /dev/null
+++ b/kud/hosting_providers/vagrant/config/default.yml
@@ -0,0 +1,53 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "controller01"
+ ip: "10.10.10.3"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-master
+ - etcd
+ - ovn-central
+- name: "controller02"
+ ip: "10.10.10.4"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-master
+ - etcd
+ - ovn-controller
+- name: "controller03"
+ ip: "10.10.10.5"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-master
+ - etcd
+ - ovn-controller
+- name: "compute01"
+ ip: "10.10.10.6"
+ memory: 32768
+ cpus: 16
+ volumes:
+ - name: sda
+ size: 50
+ mount: /var/lib/docker/
+ roles:
+ - kube-node
+ - ovn-controller
+ - virtlet
+- name: "compute02"
+ ip: "10.10.10.7"
+ memory: 8192
+ cpus: 4
+ roles:
+ - kube-node
+ - ovn-controller
diff --git a/kud/hosting_providers/vagrant/config/samples/pdf.yml.aio b/kud/hosting_providers/vagrant/config/samples/pdf.yml.aio
new file mode 100644
index 00000000..48a3c938
--- /dev/null
+++ b/kud/hosting_providers/vagrant/config/samples/pdf.yml.aio
@@ -0,0 +1,25 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "kubernetes"
+ ip: "10.10.10.3"
+ memory: 32768
+ cpus: 16
+ volumes:
+ - name: sda
+ size: 50
+ mount: /var/lib/docker/
+ roles:
+ - kube-master
+ - etcd
+ - ovn-central
+ - kube-node
+ - ovn-controller
+ - virtlet
diff --git a/kud/hosting_providers/vagrant/config/samples/pdf.yml.mini b/kud/hosting_providers/vagrant/config/samples/pdf.yml.mini
new file mode 100644
index 00000000..d53a4537
--- /dev/null
+++ b/kud/hosting_providers/vagrant/config/samples/pdf.yml.mini
@@ -0,0 +1,33 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "master"
+ ip: "10.10.10.3"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-master
+ - etcd
+ - ovn-central
+- name: "minion01"
+ ip: "10.10.10.4"
+ memory: 65536
+ cpus: 16
+ roles:
+ - kube-node
+ - ovn-controller
+ - virtlet
+- name: "minion02"
+ ip: "10.10.10.5"
+ memory: 65536
+ cpus: 16
+ roles:
+ - kube-node
+ - ovn-controller
diff --git a/kud/hosting_providers/vagrant/insecure_keys/key b/kud/hosting_providers/vagrant/insecure_keys/key
new file mode 100644
index 00000000..7d6a0839
--- /dev/null
+++ b/kud/hosting_providers/vagrant/insecure_keys/key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzI
+w+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoP
+kcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2
+hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NO
+Td0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcW
+yLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQIBIwKCAQEA4iqWPJXtzZA68mKd
+ELs4jJsdyky+ewdZeNds5tjcnHU5zUYE25K+ffJED9qUWICcLZDc81TGWjHyAqD1
+Bw7XpgUwFgeUJwUlzQurAv+/ySnxiwuaGJfhFM1CaQHzfXphgVml+fZUvnJUTvzf
+TK2Lg6EdbUE9TarUlBf/xPfuEhMSlIE5keb/Zz3/LUlRg8yDqz5w+QWVJ4utnKnK
+iqwZN0mwpwU7YSyJhlT4YV1F3n4YjLswM5wJs2oqm0jssQu/BT0tyEXNDYBLEF4A
+sClaWuSJ2kjq7KhrrYXzagqhnSei9ODYFShJu8UWVec3Ihb5ZXlzO6vdNQ1J9Xsf
+4m+2ywKBgQD6qFxx/Rv9CNN96l/4rb14HKirC2o/orApiHmHDsURs5rUKDx0f9iP
+cXN7S1uePXuJRK/5hsubaOCx3Owd2u9gD6Oq0CsMkE4CUSiJcYrMANtx54cGH7Rk
+EjFZxK8xAv1ldELEyxrFqkbE4BKd8QOt414qjvTGyAK+OLD3M2QdCQKBgQDtx8pN
+CAxR7yhHbIWT1AH66+XWN8bXq7l3RO/ukeaci98JfkbkxURZhtxV/HHuvUhnPLdX
+3TwygPBYZFNo4pzVEhzWoTtnEtrFueKxyc3+LjZpuo+mBlQ6ORtfgkr9gBVphXZG
+YEzkCD3lVdl8L4cw9BVpKrJCs1c5taGjDgdInQKBgHm/fVvv96bJxc9x1tffXAcj
+3OVdUN0UgXNCSaf/3A/phbeBQe9xS+3mpc4r6qvx+iy69mNBeNZ0xOitIjpjBo2+
+dBEjSBwLk5q5tJqHmy/jKMJL4n9ROlx93XS+njxgibTvU6Fp9w+NOFD/HvxB3Tcz
+6+jJF85D5BNAG3DBMKBjAoGBAOAxZvgsKN+JuENXsST7F89Tck2iTcQIT8g5rwWC
+P9Vt74yboe2kDT531w8+egz7nAmRBKNM751U/95P9t88EDacDI/Z2OwnuFQHCPDF
+llYOUI+SpLJ6/vURRbHSnnn8a/XG+nzedGH5JGqEJNQsz+xT2axM0/W/CRknmGaJ
+kda/AoGANWrLCz708y7VYgAtW2Uf1DPOIYMdvo6fxIB5i9ZfISgcJ/bbCUkFrhoH
++vq/5CIWxCPp0f85R4qxxQ5ihxJ0YDQT9Jpx4TMss4PSavPaBH3RXow5Ohe+bYoQ
+NE5OgEXk2wVfZczCZpigBKbKZHNYcelXtTt/nP3rsCuGcM4h53s=
+-----END RSA PRIVATE KEY-----
diff --git a/kud/hosting_providers/vagrant/insecure_keys/key.pub b/kud/hosting_providers/vagrant/insecure_keys/key.pub
new file mode 100644
index 00000000..18a9c00f
--- /dev/null
+++ b/kud/hosting_providers/vagrant/insecure_keys/key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
diff --git a/kud/hosting_providers/vagrant/installer b/kud/hosting_providers/vagrant/installer
new file mode 120000
index 00000000..2b6cb163
--- /dev/null
+++ b/kud/hosting_providers/vagrant/installer
@@ -0,0 +1 @@
+installer.sh \ No newline at end of file
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
new file mode 100755
index 00000000..3f3595b1
--- /dev/null
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -0,0 +1,240 @@
+#!/bin/bash
+#SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o pipefail
+
+# _install_go() - Install GoLang package
+function _install_go {
+ version=$(grep "go_version" ${kud_playbooks}/kud-vars.yml | awk -F "'" '{print $2}')
+ local tarball=go$version.linux-amd64.tar.gz
+
+ if $(go version &>/dev/null); then
+ return
+ fi
+
+ wget https://dl.google.com/go/$tarball
+ sudo tar -C /usr/local -xzf $tarball
+ rm $tarball
+
+ export PATH=$PATH:/usr/local/go/bin
+ sudo sed -i "s|^PATH=.*|PATH=\"$PATH\"|" /etc/environment
+}
+
+# _install_pip() - Install Python Package Manager
+function _install_pip {
+ if $(pip --version &>/dev/null); then
+ sudo apt-get install -y python-dev
+ curl -sL https://bootstrap.pypa.io/get-pip.py | sudo python
+ else
+ sudo -E pip install --upgrade pip
+ fi
+}
+
+# _install_ansible() - Install and Configure Ansible program
+function _install_ansible {
+ sudo mkdir -p /etc/ansible/
+ if $(ansible --version &>/dev/null); then
+ return
+ fi
+ _install_pip
+ sudo -E pip install ansible
+}
+
+# _install_docker() - Download and install docker-engine
+function _install_docker {
+ local max_concurrent_downloads=${1:-3}
+
+ if $(docker version &>/dev/null); then
+ return
+ fi
+ sudo apt-get install -y apt-transport-https ca-certificates curl
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+ sudo apt-get update
+ sudo apt-get install -y docker-ce
+
+ sudo mkdir -p /etc/systemd/system/docker.service.d
+ if [ $http_proxy ]; then
+ echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf
+ echo "Environment=\"HTTP_PROXY=$http_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/http-proxy.conf
+ fi
+ if [ $https_proxy ]; then
+ echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/https-proxy.conf
+ echo "Environment=\"HTTPS_PROXY=$https_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/https-proxy.conf
+ fi
+ if [ $no_proxy ]; then
+ echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/no-proxy.conf
+ echo "Environment=\"NO_PROXY=$no_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/no-proxy.conf
+ fi
+ sudo systemctl daemon-reload
+ echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --max-concurrent-downloads $max_concurrent_downloads \"" | sudo tee --append /etc/default/docker
+ if [[ -z $(groups | grep docker) ]]; then
+ sudo usermod -aG docker $USER
+ newgrp docker
+ fi
+
+ sudo systemctl restart docker
+ sleep 10
+}
+
+function _set_environment_file {
+ ansible_ifconfig=$(ansible ovn-central[0] -i $kud_inventory -m shell -a "ifconfig eth1 |grep \"inet addr\" |awk '{print \$2}' |awk -F: '{print \$2}'")
+ if [[ $ansible_ifconfig != *CHANGED* ]]; then
+ echo "Fail to get the OVN central IP address from eth1 nic"
+ exit
+ fi
+ echo "export OVN_CENTRAL_ADDRESS=$(echo ${ansible_ifconfig#*>>} | tr '\n' ':')6641" | sudo tee --append /etc/environment
+ echo "export KUBE_CONFIG_DIR=/opt/kubeconfig" | sudo tee --append /etc/environment
+}
+
+# install_k8s() - Install Kubernetes using kubespray tool
+function install_k8s {
+ echo "Deploying kubernetes"
+ local dest_folder=/opt
+ version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | awk -F ': ' '{print $2}')
+ local_release_dir=$(grep "local_release_dir" $kud_inventory_folder/group_vars/k8s-cluster.yml | awk -F "\"" '{print $2}')
+ local tarball=v$version.tar.gz
+ sudo apt-get install -y sshpass
+ _install_docker
+ _install_ansible
+ wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball
+ sudo tar -C $dest_folder -xzf $tarball
+ sudo mv $dest_folder/kubespray-$version/ansible.cfg /etc/ansible/ansible.cfg
+ sudo chown -R $USER $dest_folder/kubespray-$version
+ sudo mkdir -p ${local_release_dir}/containers
+ rm $tarball
+
+ sudo -E pip install -r $dest_folder/kubespray-$version/requirements.txt
+ rm -f $kud_inventory_folder/group_vars/all.yml 2> /dev/null
+ if [[ -n "${verbose}" ]]; then
+ echo "kube_log_level: 5" | tee $kud_inventory_folder/group_vars/all.yml
+ else
+ echo "kube_log_level: 2" | tee $kud_inventory_folder/group_vars/all.yml
+ fi
+ echo "kubeadm_enabled: true" | tee --append $kud_inventory_folder/group_vars/all.yml
+ if [[ -n "${http_proxy}" ]]; then
+ echo "http_proxy: \"$http_proxy\"" | tee --append $kud_inventory_folder/group_vars/all.yml
+ fi
+ if [[ -n "${https_proxy}" ]]; then
+ echo "https_proxy: \"$https_proxy\"" | tee --append $kud_inventory_folder/group_vars/all.yml
+ fi
+ ansible-playbook $verbose -i $kud_inventory $dest_folder/kubespray-$version/cluster.yml --become --become-user=root | sudo tee $log_folder/setup-kubernetes.log
+
+ # Configure environment
+ mkdir -p $HOME/.kube
+ cp $kud_inventory_folder/artifacts/admin.conf $HOME/.kube/config
+}
+
+# install_addons() - Install Kubenertes AddOns
+function install_addons {
+ echo "Installing Kubernetes AddOns"
+ _install_ansible
+ sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors
+
+ ansible-playbook $verbose -i $kud_inventory $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
+ for addon in ${KRD_ADDONS:-virtlet ovn4nfv}; do
+ echo "Deploying $addon using configure-$addon.yml playbook.."
+ ansible-playbook $verbose -i $kud_inventory $kud_playbooks/configure-${addon}.yml | sudo tee $log_folder/setup-${addon}.log
+ if [[ "${testing_enabled}" == "true" ]]; then
+ pushd $kud_tests
+ bash ${addon}.sh
+ popd
+ fi
+ done
+}
+
+# install_plugin() - Install ONAP Multicloud Kubernetes plugin
+function install_plugin {
+ echo "Installing multicloud/k8s plugin"
+ _install_go
+ _install_docker
+ sudo -E pip install docker-compose
+
+ sudo mkdir -p /opt/{kubeconfig,consul/config}
+ sudo cp $HOME/.kube/config /opt/kubeconfig/kud
+ _set_environment_file
+ source /etc/environment
+
+ pushd $kud_folder/../../../deployments
+ sudo ./build.sh
+ if [[ "${testing_enabled}" == "true" ]]; then
+ docker-compose up -d
+ pushd $kud_tests
+ for functional_test in plugin plugin_edgex; do
+ bash ${functional_test}.sh
+ done
+ popd
+ fi
+ popd
+}
+
+# _print_kubernetes_info() - Prints the login Kubernetes information
+function _print_kubernetes_info {
+ if ! $(kubectl version &>/dev/null); then
+ return
+ fi
+ # Expose Dashboard using NodePort
+ node_port=30080
+ KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" kubectl -n kube-system edit service kubernetes-dashboard
+ KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" kubectl -n kube-system edit service kubernetes-dashboard
+
+ master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F ":" '{print $2}')
+
+ printf "Kubernetes Info\n===============\n" > $k8s_info_file
+ echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file
+ echo "Admin user: kube" >> $k8s_info_file
+ echo "Admin password: secret" >> $k8s_info_file
+}
+
+if ! sudo -n "true"; then
+ echo ""
+ echo "passwordless sudo is needed for '$(id -nu)' user."
+ echo "Please fix your /etc/sudoers file. You likely want an"
+ echo "entry like the following one..."
+ echo ""
+ echo "$(id -nu) ALL=(ALL) NOPASSWD: ALL"
+ exit 1
+fi
+
+if [[ -n "${KUD_DEBUG}" ]]; then
+ set -o xtrace
+ verbose="-vvv"
+fi
+
+# Configuration values
+log_folder=/var/log/kud
+kud_folder=$(pwd)
+kud_infra_folder=$kud_folder/../../deployment_infra
+export kud_inventory_folder=$kud_folder/inventory
+kud_inventory=$kud_inventory_folder/hosts.ini
+kud_playbooks=$kud_infra_folder/playbooks
+kud_tests=$kud_folder/tests
+k8s_info_file=$kud_folder/k8s_info.log
+testing_enabled=${KUD_ENABLE_TESTS:-false}
+
+sudo mkdir -p $log_folder
+sudo mkdir -p /opt/csar
+sudo chown -R $USER /opt/csar
+echo "export CSAR_DIR=/opt/csar" | sudo tee --append /etc/environment
+
+# Install dependencies
+# Setup proxy variables
+if [ -f $kud_folder/sources.list ]; then
+ sudo mv /etc/apt/sources.list /etc/apt/sources.list.backup
+ sudo cp $kud_folder/sources.list /etc/apt/sources.list
+fi
+sudo apt-get update
+install_k8s
+install_addons
+if [[ "${KUD_PLUGIN_ENABLED:-false}" ]]; then
+ install_plugin
+fi
+_print_kubernetes_info
diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
new file mode 100644
index 00000000..8f719a43
--- /dev/null
+++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -0,0 +1,71 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+kube_api_anonymous_auth: true
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "secret"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+kube_token_auth: true
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: flannel
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+
+# Enable MountPropagation gate feature
+local_volumes_enabled: true
+local_volume_provisioner_enabled: true
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+kube_version: v1.12.3
+
+# Helm deployment
+helm_enabled: true
+
+# Kube-proxy proxyMode configuration.
+# NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and
+# works in the kernel space
+# https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-ipvs
+#kube_proxy_mode: ipvs
+
+# Download container images only once then push to cluster nodes in batches
+download_run_once: true
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+
+# Makes the installer node a delegate for pushing images while running
+# the deployment with ansible. This maybe the case if cluster nodes
+# cannot access each over via ssh or you want to use local docker
+# images as a cache for multiple clusters.
+download_localhost: true
diff --git a/kud/hosting_providers/vagrant/node.sh b/kud/hosting_providers/vagrant/node.sh
new file mode 100755
index 00000000..a51be19b
--- /dev/null
+++ b/kud/hosting_providers/vagrant/node.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o nounset
+set -o pipefail
+
+# usage() - Prints the usage of the program
+function usage {
+ cat <<EOF
+usage: $0 [-v volumes]
+Optional Argument:
+ -v List of key pair values for volumes and mount points ( e. g. sda=/var/lib/docker/,sdb=/var/lib/libvirt/ )
+EOF
+}
+
+# mount_external_partition() - Create partition and mount the external volume
+function mount_external_partition {
+ local dev_name="/dev/$1"
+ local mount_dir=$2
+
+ sfdisk $dev_name --no-reread << EOF
+;
+EOF
+ mkfs -t ext4 ${dev_name}1
+ mkdir -p $mount_dir
+ mount ${dev_name}1 $mount_dir
+ echo "${dev_name}1 $mount_dir ext4 errors=remount-ro,noatime,barrier=0 0 1" >> /etc/fstab
+}
+
+while getopts "h?v:" opt; do
+ case $opt in
+ v)
+ dict_volumes="$OPTARG"
+ ;;
+ h|\?)
+ usage
+ exit
+ ;;
+ esac
+done
+
+swapoff -a
+if [[ -n "${dict_volumes+x}" ]]; then
+ for kv in ${dict_volumes//,/ } ;do
+ mount_external_partition ${kv%=*} ${kv#*=}
+ done
+fi
+
+vendor_id=$(lscpu|grep "Vendor ID")
+if [[ $vendor_id == *GenuineIntel* ]]; then
+ kvm_ok=$(cat /sys/module/kvm_intel/parameters/nested)
+ if [[ $kvm_ok == 'N' ]]; then
+ echo "Enable Intel Nested-Virtualization"
+ rmmod kvm-intel
+ echo 'options kvm-intel nested=y' >> /etc/modprobe.d/dist.conf
+ modprobe kvm-intel
+ echo kvm-intel >> /etc/modules
+ fi
+else
+ kvm_ok=$(cat /sys/module/kvm_amd/parameters/nested)
+ if [[ $kvm_ok == '0' ]]; then
+ echo "Enable AMD Nested-Virtualization"
+ rmmod kvm-amd
+ sh -c "echo 'options kvm-amd nested=1' >> /etc/modprobe.d/dist.conf"
+ modprobe kvm-amd
+ echo kvm-amd >> /etc/modules
+ fi
+fi
+modprobe vhost_net
+echo vhost_net >> /etc/modules
+source /etc/os-release || source /usr/lib/os-release
+case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get install -y cpu-checker
+ kvm-ok
+ ;;
+ rhel|centos|fedora)
+ ;;
+esac
diff --git a/kud/hosting_providers/vagrant/setup.sh b/kud/hosting_providers/vagrant/setup.sh
new file mode 100755
index 00000000..9c65ccdb
--- /dev/null
+++ b/kud/hosting_providers/vagrant/setup.sh
@@ -0,0 +1,201 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o nounset
+set -o pipefail
+
+vagrant_version=2.2.4
+if ! vagrant version &>/dev/null; then
+ enable_vagrant_install=true
+else
+ if [[ "$vagrant_version" != "$(vagrant version | awk 'NR==1{print $3}')" ]]; then
+ enable_vagrant_install=true
+ fi
+fi
+
+function usage {
+ cat <<EOF
+usage: $0 -p <PROVIDER>
+Installation of vagrant and its dependencies in Linux OS
+
+Argument:
+ -p Vagrant provider
+EOF
+}
+
+while getopts ":p:" OPTION; do
+ case $OPTION in
+ p)
+ provider=$OPTARG
+ ;;
+ \?)
+ usage
+ exit 1
+ ;;
+ esac
+done
+if [[ -z "${provider+x}" ]]; then
+ usage
+ exit 1
+fi
+
+case $provider in
+ "virtualbox" | "libvirt" )
+ export VAGRANT_DEFAULT_PROVIDER=${provider}
+ ;;
+ * )
+ usage
+ exit 1
+esac
+source /etc/os-release || source /usr/lib/os-release
+
+libvirt_group="libvirt"
+packages=()
+case ${ID,,} in
+ *suse)
+ INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends"
+ packages+=(python-devel)
+
+ # Vagrant installation
+ if [[ "${enable_vagrant_install+x}" ]]; then
+ vagrant_pgp="pgp_keys.asc"
+ wget -q https://keybase.io/hashicorp/$vagrant_pgp
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.rpm
+ gpg --quiet --with-fingerprint $vagrant_pgp
+ sudo rpm --import $vagrant_pgp
+ sudo rpm --checksig vagrant_${vagrant_version}_x86_64.rpm
+ sudo rpm --install vagrant_${vagrant_version}_x86_64.rpm
+ rm vagrant_${vagrant_version}_x86_64.rpm
+ rm $vagrant_pgp
+ fi
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ wget -q "http://download.virtualbox.org/virtualbox/rpm/opensuse/$VERSION/virtualbox.repo" -P /etc/zypp/repos.d/
+ $INSTALLER_CMD --enablerepo=epel dkms
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | rpm --import -
+ packages+=(VirtualBox-5.1)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm zlib-devel libxml2-devel libxslt-devel make)
+ # NFS
+ packages+=(nfs-kernel-server)
+ ;;
+ esac
+ sudo zypper -n ref
+ ;;
+
+ ubuntu|debian)
+ libvirt_group="libvirtd"
+ INSTALLER_CMD="sudo -H -E apt-get -y -q=3 install"
+ packages+=(python-dev)
+
+ # Vagrant installation
+ if [[ "${enable_vagrant_install+x}" ]]; then
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.deb
+ sudo dpkg -i vagrant_${vagrant_version}_x86_64.deb
+ rm vagrant_${vagrant_version}_x86_64.deb
+ fi
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ echo "deb http://download.virtualbox.org/virtualbox/debian trusty contrib" >> /etc/apt/sources.list
+ wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -
+ packages+=(virtualbox-5.1 dkms)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt-bin ebtables dnsmasq libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev cpu-checker)
+ # NFS
+ packages+=(nfs-kernel-server)
+ ;;
+ esac
+ sudo apt-get update
+ ;;
+
+ rhel|centos|fedora)
+ PKG_MANAGER=$(which dnf || which yum)
+ sudo "$PKG_MANAGER" updateinfo
+ INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -q -y install"
+ packages+=(python-devel)
+
+ # Vagrant installation
+ if [[ "${enable_vagrant_install+x}" ]]; then
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.rpm
+ $INSTALLER_CMD vagrant_${vagrant_version}_x86_64.rpm
+ rm vagrant_${vagrant_version}_x86_64.rpm
+ fi
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ wget -q http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo -P /etc/yum.repos.d
+ $INSTALLER_CMD --enablerepo=epel dkms
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | rpm --import -
+ packages+=(VirtualBox-5.1)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm)
+ # NFS
+ packages+=(nfs-utils nfs-utils-lib)
+ ;;
+ esac
+ ;;
+
+esac
+
+# Enable Nested-Virtualization
+vendor_id=$(lscpu|grep "Vendor ID")
+if [[ $vendor_id == *GenuineIntel* ]]; then
+ kvm_ok=$(cat /sys/module/kvm_intel/parameters/nested)
+ if [[ $kvm_ok == 'N' ]]; then
+ echo "Enable Intel Nested-Virtualization"
+ sudo rmmod kvm-intel
+ echo 'options kvm-intel nested=y' | sudo tee --append /etc/modprobe.d/dist.conf
+ sudo modprobe kvm-intel
+ fi
+else
+ kvm_ok=$(cat /sys/module/kvm_amd/parameters/nested)
+ if [[ $kvm_ok == '0' ]]; then
+ echo "Enable AMD Nested-Virtualization"
+ sudo rmmod kvm-amd
+ echo 'options kvm-amd nested=1' | sudo tee --append /etc/modprobe.d/dist.conf
+ sudo modprobe kvm-amd
+ fi
+fi
+sudo modprobe vhost_net
+
+${INSTALLER_CMD} "${packages[@]}"
+if ! which pip; then
+ curl -sL https://bootstrap.pypa.io/get-pip.py | sudo python
+else
+ sudo -H -E pip install --upgrade pip
+fi
+sudo -H -E pip install tox
+if [[ ${http_proxy+x} ]]; then
+ vagrant plugin install vagrant-proxyconf
+fi
+if [ "$VAGRANT_DEFAULT_PROVIDER" == libvirt ]; then
+ vagrant plugin install vagrant-libvirt
+ sudo usermod -a -G $libvirt_group "$USER" # This might require to reload user's group assigments
+ sudo systemctl restart libvirtd
+
+ # Start statd service to prevent NFS lock errors
+ sudo systemctl enable rpc-statd
+ sudo systemctl start rpc-statd
+
+ case ${ID,,} in
+ ubuntu|debian)
+ kvm-ok
+ ;;
+ esac
+fi
diff --git a/kud/tests/_common.sh b/kud/tests/_common.sh
new file mode 100755
index 00000000..f0fa7d6f
--- /dev/null
+++ b/kud/tests/_common.sh
@@ -0,0 +1,1118 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+packetgen_deployment_name=packetgen
+sink_deployment_name=sink
+firewall_deployment_name=firewall
+image_name=virtlet.cloud/ubuntu/16.04
+multus_deployment_name=multus-deployment
+virtlet_image=virtlet.cloud/fedora
+virtlet_deployment_name=virtlet-deployment
+plugin_deployment_name=plugin-deployment
+plugin_service_name=plugin-service
+ovn4nfv_deployment_name=ovn4nfv-deployment
+onap_private_net=onap-private-net
+unprotected_private_net=unprotected-private-net
+protected_private_net=protected-private-net
+ovn_multus_network_name=ovn-networkobj
+rbd_metadata=rbd_metatada.json
+rbd_content_tarball=vault-consul-dev.tar
+rbp_metadata=rbp_metatada.json
+rbp_instance=rbp_instance.json
+rbp_content_tarball=profile.tar
+
+# vFirewall vars
+demo_artifacts_version=1.3.0
+vfw_private_ip_0='192.168.10.3'
+vfw_private_ip_1='192.168.20.2'
+vfw_private_ip_2='10.10.100.3'
+vpg_private_ip_0='192.168.10.2'
+vpg_private_ip_1='10.0.100.2'
+vsn_private_ip_0='192.168.20.3'
+vsn_private_ip_1='10.10.100.4'
+dcae_collector_ip='10.0.4.1'
+dcae_collector_port='8081'
+protected_net_gw='192.168.20.100'
+protected_net_cidr='192.168.20.0/24'
+protected_private_net_cidr='192.168.10.0/24'
+onap_private_net_cidr='10.10.0.0/16'
+
+# populate_CSAR_containers_vFW() - This function creates the content of CSAR file
+# required for vFirewal using only containers
+function populate_CSAR_containers_vFW {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - $unprotected_private_net.yaml
+ - $protected_private_net.yaml
+ - $onap_private_net.yaml
+ deployment:
+ - $packetgen_deployment_name.yaml
+ - $firewall_deployment_name.yaml
+ - $sink_deployment_name.yaml
+META
+
+ cat << NET > $unprotected_private_net.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: $unprotected_private_net
+spec:
+ config: '{
+ "name": "unprotected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$protected_private_net_cidr"
+ }
+}'
+NET
+
+ cat << NET > $protected_private_net.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: $protected_private_net
+spec:
+ config: '{
+ "name": "protected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$protected_net_cidr"
+ }
+}'
+NET
+
+ cat << NET > $onap_private_net.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: $onap_private_net
+spec:
+ config: '{
+ "name": "onap",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$onap_private_net_cidr"
+ }
+}'
+NET
+
+ cat << DEPLOYMENT > $packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ k8s.v1.cni.cncf.io/networks: '[
+ { "name": "$unprotected_private_net", "interfaceRequest": "eth1" },
+ { "name": "$onap_private_net", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $packetgen_deployment_name
+ image: electrocucaracha/packetgen
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 256Mi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ k8s.v1.cni.cncf.io/networks: '[
+ { "name": "$unprotected_private_net", "interfaceRequest": "eth1" },
+ { "name": "$protected_private_net", "interfaceRequest": "eth2" },
+ { "name": "$onap_private_net", "interfaceRequest": "eth3" }
+ ]'
+ spec:
+ containers:
+ - name: $firewall_deployment_name
+ image: electrocucaracha/firewall
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ context: darkstat
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ context: darkstat
+ annotations:
+ k8s.v1.cni.cncf.io/networks: '[
+ { "name": "$protected_private_net", "interfaceRequest": "eth1" },
+ { "name": "$onap_private_net", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $sink_deployment_name
+ image: electrocucaracha/sink
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ securityContext:
+ privileged: true
+ - name: darkstat
+ image: electrocucaracha/darkstat
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ ports:
+ - containerPort: 667
+DEPLOYMENT
+ popd
+}
+
+# populate_CSAR_vms_containers_vFW() - This function creates the content of CSAR file
+# required for vFirewal using an hybrid combination between virtual machines and
+# cotainers
+function populate_CSAR_vms_containers_vFW {
+ local csar_id=$1
+ ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - onap-ovn4nfvk8s-network.yaml
+ onapNetwork:
+ - $unprotected_private_net.yaml
+ - $protected_private_net.yaml
+ - $onap_private_net.yaml
+ deployment:
+ - $packetgen_deployment_name.yaml
+ - $firewall_deployment_name.yaml
+ - $sink_deployment_name.yaml
+ service:
+ - sink-service.yaml
+META
+
+ cat << SERVICE > sink-service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: sink-service
+spec:
+ type: NodePort
+ ports:
+ - port: 667
+ nodePort: 30667
+ selector:
+ app: vFirewall
+ context: darkstat
+SERVICE
+
+ cat << MULTUS_NET > onap-ovn4nfvk8s-network.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: $ovn_multus_network_name
+spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "name": "ovn4nfv-k8s-plugin",
+ "type": "ovn4nfvk8s-cni"
+ }'
+MULTUS_NET
+
+ cat << NET > $unprotected_private_net.yaml
+apiVersion: v1
+kind: onapNetwork
+metadata:
+ name: $unprotected_private_net
+ cnitype : ovn4nfvk8s
+spec:
+ name: $unprotected_private_net
+ subnet: $protected_private_net_cidr
+ gateway: 192.168.10.1/24
+NET
+
+ cat << NET > $protected_private_net.yaml
+apiVersion: v1
+kind: onapNetwork
+metadata:
+ name: $protected_private_net
+ cnitype : ovn4nfvk8s
+spec:
+ name: $protected_private_net
+ subnet: $protected_net_cidr
+ gateway: $protected_net_gw/24
+NET
+
+ cat << NET > $onap_private_net.yaml
+apiVersion: v1
+kind: onapNetwork
+metadata:
+ name: $onap_private_net
+ cnitype : ovn4nfvk8s
+spec:
+ name: $onap_private_net
+ subnet: $onap_private_net_cidr
+ gateway: 10.10.0.1/16
+NET
+
+ proxy="apt:"
+ cloud_init_proxy="
+ - export demo_artifacts_version=$demo_artifacts_version
+ - export vfw_private_ip_0=$vfw_private_ip_0
+ - export vsn_private_ip_0=$vsn_private_ip_0
+ - export protected_net_cidr=$protected_net_cidr
+ - export dcae_collector_ip=$dcae_collector_ip
+ - export dcae_collector_port=$dcae_collector_port
+ - export protected_net_gw=$protected_net_gw
+ - export protected_private_net_cidr=$protected_private_net_cidr
+"
+ if [[ -n "${http_proxy+x}" ]]; then
+ proxy+="
+ http_proxy: $http_proxy"
+ cloud_init_proxy+="
+ - export http_proxy=$http_proxy"
+ fi
+ if [[ -n "${https_proxy+x}" ]]; then
+ proxy+="
+ https_proxy: $https_proxy"
+ cloud_init_proxy+="
+ - export https_proxy=$https_proxy"
+ fi
+ if [[ -n "${no_proxy+x}" ]]; then
+ cloud_init_proxy+="
+ - export no_proxy=$no_proxy"
+ fi
+
+ cat << DEPLOYMENT > $packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletLibvirtCPUSetting: |
+ mode: host-model
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ # the password is "admin"
+ passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
+ ssh_authorized_keys:
+ $ssh_key
+ $proxy
+ runcmd:
+ $cloud_init_proxy
+ - wget -O - https://git.onap.org/multicloud/k8s/plain/vagrant/tests/vFW/$packetgen_deployment_name | sudo -E bash
+ VirtletSSHKeys: |
+ $ssh_key
+ VirtletRootVolumeSize: 5Gi
+ k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
+ ovnNetwork: '[
+ { "name": "$unprotected_private_net", "ipAddress": "$vpg_private_ip_0", "interface": "eth1" , "defaultGateway": "false"},
+ { "name": "$onap_private_net", "ipAddress": "$vpg_private_ip_1", "interface": "eth2" , "defaultGateway": "false"}
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $packetgen_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ ports:
+ - containerPort: 8183
+ resources:
+ limits:
+ memory: 4Gi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletLibvirtCPUSetting: |
+ mode: host-model
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ # the password is "admin"
+ passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
+ ssh_authorized_keys:
+ $ssh_key
+ $proxy
+ runcmd:
+ $cloud_init_proxy
+ - wget -O - https://git.onap.org/multicloud/k8s/plain/vagrant/tests/vFW/$firewall_deployment_name | sudo -E bash
+ VirtletSSHKeys: |
+ $ssh_key
+ VirtletRootVolumeSize: 5Gi
+ k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
+ ovnNetwork: '[
+ { "name": "$unprotected_private_net", "ipAddress": "$vfw_private_ip_0", "interface": "eth1" , "defaultGateway": "false"},
+ { "name": "$protected_private_net", "ipAddress": "$vfw_private_ip_1", "interface": "eth2", "defaultGateway": "false" },
+ { "name": "$onap_private_net", "ipAddress": "$vfw_private_ip_2", "interface": "eth3" , "defaultGateway": "false"}
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $firewall_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 4Gi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ context: darkstat
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ context: darkstat
+ annotations:
+ k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
+ ovnNetwork: '[
+ { "name": "$protected_private_net", "ipAddress": "$vsn_private_ip_0", "interface": "eth1", "defaultGateway": "false" },
+ { "name": "$onap_private_net", "ipAddress": "$vsn_private_ip_1", "interface": "eth2" , "defaultGateway": "false"}
+ ]'
+ spec:
+ containers:
+ - name: $sink_deployment_name
+ image: electrocucaracha/sink
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ securityContext:
+ privileged: true
+ - name: darkstat
+ image: electrocucaracha/darkstat
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ ports:
+ - containerPort: 667
+DEPLOYMENT
+ popd
+}
+
+# populate_CSAR_vms_vFW() - This function creates the content of CSAR file
+# required for vFirewal using only virtual machines
+function populate_CSAR_vms_vFW {
+ local csar_id=$1
+ ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - $unprotected_private_net.yaml
+ - $protected_private_net.yaml
+ - $onap_private_net.yaml
+ deployment:
+ - $packetgen_deployment_name.yaml
+ - $firewall_deployment_name.yaml
+ - $sink_deployment_name.yaml
+META
+
+ cat << NET > $unprotected_private_net.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: $unprotected_private_net
+spec:
+ config: '{
+ "name": "unprotected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$protected_private_net_cidr"
+ }
+}'
+NET
+
+ cat << NET > $protected_private_net.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: $protected_private_net
+spec:
+ config: '{
+ "name": "protected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$protected_net_cidr"
+ }
+}'
+NET
+
+ cat << NET > $onap_private_net.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: $onap_private_net
+spec:
+ config: '{
+ "name": "onap",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$onap_private_net_cidr"
+ }
+}'
+NET
+
+ proxy="apt:"
+ cloud_init_proxy="
+ - export demo_artifacts_version=$demo_artifacts_version
+ - export vfw_private_ip_0=$vfw_private_ip_0
+ - export vsn_private_ip_0=$vsn_private_ip_0
+ - export protected_net_cidr=$protected_net_cidr
+ - export dcae_collector_ip=$dcae_collector_ip
+ - export dcae_collector_port=$dcae_collector_port
+ - export protected_net_gw=$protected_net_gw
+ - export protected_private_net_cidr=$protected_private_net_cidr
+"
+ if [[ -n "${http_proxy+x}" ]]; then
+ proxy+="
+ http_proxy: $http_proxy"
+ cloud_init_proxy+="
+ - export http_proxy=$http_proxy"
+ fi
+ if [[ -n "${https_proxy+x}" ]]; then
+ proxy+="
+ https_proxy: $https_proxy"
+ cloud_init_proxy+="
+ - export https_proxy=$https_proxy"
+ fi
+ if [[ -n "${no_proxy+x}" ]]; then
+ cloud_init_proxy+="
+ - export no_proxy=$no_proxy"
+ fi
+
+ cat << DEPLOYMENT > $packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletLibvirtCPUSetting: |
+ mode: host-model
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ # the password is "admin"
+ passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
+ ssh_authorized_keys:
+ $ssh_key
+ $proxy
+ runcmd:
+ $cloud_init_proxy
+ - wget -O - https://git.onap.org/multicloud/k8s/plain/vagrant/tests/vFW/$packetgen_deployment_name | sudo -E bash
+ VirtletSSHKeys: |
+ $ssh_key
+ VirtletRootVolumeSize: 5Gi
+ k8s.v1.cni.cncf.io/networks: '[
+ { "name": "$unprotected_private_net", "interfaceRequest": "eth1" },
+ { "name": "$onap_private_net", "interfaceRequest": "eth2" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $packetgen_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ ports:
+ - containerPort: 8183
+ resources:
+ limits:
+ memory: 4Gi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletLibvirtCPUSetting: |
+ mode: host-model
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ # the password is "admin"
+ passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
+ ssh_authorized_keys:
+ $ssh_key
+ $proxy
+ runcmd:
+ $cloud_init_proxy
+ - wget -O - https://git.onap.org/multicloud/k8s/plain/vagrant/tests/vFW/$firewall_deployment_name | sudo -E bash
+ VirtletSSHKeys: |
+ $ssh_key
+ VirtletRootVolumeSize: 5Gi
+ k8s.v1.cni.cncf.io/networks: '[
+ { "name": "$unprotected_private_net", "interfaceRequest": "eth1" },
+ { "name": "$protected_private_net", "interfaceRequest": "eth2" },
+ { "name": "$onap_private_net", "interfaceRequest": "eth3" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $firewall_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 4Gi
+DEPLOYMENT
+
+ cat << DEPLOYMENT > $sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletLibvirtCPUSetting: |
+ mode: host-model
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: admin
+ gecos: User
+ primary-group: admin
+ groups: users
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ # the password is "admin"
+ passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
+ ssh_authorized_keys:
+ $ssh_key
+ $proxy
+ runcmd:
+ $cloud_init_proxy
+ - wget -O - https://git.onap.org/multicloud/k8s/plain/vagrant/tests/vFW/$sink_deployment_name | sudo -E bash
+ VirtletSSHKeys: |
+ $ssh_key
+ VirtletRootVolumeSize: 5Gi
+ k8s.v1.cni.cncf.io/networks: '[
+ { "name": "$protected_private_net", "interfaceRequest": "eth1" },
+ { "name": "$onap_private_net", "interfaceRequest": "eth2" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $sink_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ ports:
+ - containerPort: 667
+ resources:
+ limits:
+ memory: 4Gi
+DEPLOYMENT
+ popd
+}
+
+# populate_CSAR_multus() - This function creates the content of CSAR file
+# required for testing Multus feature
+function populate_CSAR_multus {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ network:
+ - bridge-network.yaml
+ deployment:
+ - $multus_deployment_name.yaml
+META
+
+ cat << NET > bridge-network.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: bridge-conf
+spec:
+ config: '{
+ "cniVersion": "0.3.0",
+ "name": "mynet",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "$onap_private_net_cidr"
+ }
+}'
+NET
+
+ cat << DEPLOYMENT > $multus_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $multus_deployment_name
+ labels:
+ app: multus
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: multus
+ template:
+ metadata:
+ labels:
+ app: multus
+ annotations:
+ k8s.v1.cni.cncf.io/networks: '[
+ { "name": "bridge-conf", "interfaceRequest": "eth1" },
+ { "name": "bridge-conf", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $multus_deployment_name
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+DEPLOYMENT
+ popd
+}
+
+# populate_CSAR_virtlet() - This function creates the content of CSAR file
+# required for testing Virtlet feature
+function populate_CSAR_virtlet {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ deployment:
+ - $virtlet_deployment_name.yaml
+META
+
+ cat << DEPLOYMENT > $virtlet_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $virtlet_deployment_name
+ labels:
+ app: virtlet
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: virtlet
+ template:
+ metadata:
+ labels:
+ app: virtlet
+ annotations:
+ VirtletLibvirtCPUSetting: |
+ mode: host-passthrough
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ users:
+ - name: testuser
+ gecos: User
+ primary-group: testuser
+ groups: users
+ lock_passwd: false
+ shell: /bin/bash
+ # the password is "testuser"
+ passwd: "\$6\$rounds=4096\$wPs4Hz4tfs\$a8ssMnlvH.3GX88yxXKF2cKMlVULsnydoOKgkuStTErTq2dzKZiIx9R/pPWWh5JLxzoZEx7lsSX5T2jW5WISi1"
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ runcmd:
+ - echo hello world
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $virtlet_deployment_name
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: $virtlet_image
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for "kubectl attach -t" to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi
+DEPLOYMENT
+ popd
+}
+
+# populate_CSAR_plugin()- Creates content used for Plugin functional tests
+function populate_CSAR_plugin {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ deployment:
+ - $plugin_deployment_name.yaml
+ service:
+ - service.yaml
+META
+
+ cat << DEPLOYMENT > $plugin_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $plugin_deployment_name
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: plugin
+ template:
+ metadata:
+ labels:
+ app: plugin
+ spec:
+ containers:
+ - name: $plugin_deployment_name
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+DEPLOYMENT
+
+ cat << SERVICE > service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: $plugin_service_name
+spec:
+ ports:
+ - port: 80
+ protocol: TCP
+ selector:
+ app: sise
+SERVICE
+ popd
+}
+
+# populate_CSAR_ovn4nfv() - Create content used for OVN4NFV functional test
+function populate_CSAR_ovn4nfv {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << META > metadata.yaml
+resources:
+ onap_network:
+ - ovn-port-net.yaml
+ - ovn-priv-net.yaml
+ network:
+ - onap-ovn4nfvk8s-network.yaml
+ deployment:
+ - $ovn4nfv_deployment_name.yaml
+META
+
+ cat << MULTUS_NET > onap-ovn4nfvk8s-network.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: $ovn_multus_network_name
+spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "name": "ovn4nfv-k8s-plugin",
+ "type": "ovn4nfvk8s-cni"
+ }'
+MULTUS_NET
+
+ cat << NETWORK > ovn-port-net.yaml
+apiVersion: v1
+kind: onapNetwork
+metadata:
+ name: ovn-port-net
+ cnitype : ovn4nfvk8s
+spec:
+ name: ovn-port-net
+ subnet: 172.16.33.0/24
+ gateway: 172.16.33.1/24
+NETWORK
+
+ cat << NETWORK > ovn-priv-net.yaml
+apiVersion: v1
+kind: onapNetwork
+metadata:
+ name: ovn-priv-net
+ cnitype : ovn4nfvk8s
+spec:
+ name: ovn-priv-net
+ subnet: 172.16.44.0/24
+ gateway: 172.16.44.1/24
+NETWORK
+
+ cat << DEPLOYMENT > $ovn4nfv_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $ovn4nfv_deployment_name
+ labels:
+ app: ovn4nfv
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: ovn4nfv
+ template:
+ metadata:
+ labels:
+ app: ovn4nfv
+ annotations:
+ k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
+ ovnNetwork: '[{ "name": "ovn-port-net", "interface": "net0" , "defaultGateway": "false"},
+ { "name": "ovn-priv-net", "interface": "net1" , "defaultGateway": "false"}]'
+ spec:
+ containers:
+ - name: $ovn4nfv_deployment_name
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+DEPLOYMENT
+ popd
+}
+
+# populate_CSAR_rbdefinition() - Function that populates CSAR folder
+# for testing resource bundle definition
+function populate_CSAR_rbdefinition {
+ local csar_id=$1
+
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+ print_msg "Create Helm Chart Archives"
+ rm -f ${rbd_content_tarball}.gz
+ rm -f ${rbp_content_tarball}.gz
+ tar -cf $rbd_content_tarball -C $test_folder/vnfs/testrb/helm vault-consul-dev
+ tar -cf $rbp_content_tarball -C $test_folder/vnfs/testrb/helm/profile .
+ gzip $rbp_content_tarball
+ gzip $rbd_content_tarball
+ popd
+}
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
new file mode 100755
index 00000000..c25d1f2f
--- /dev/null
+++ b/kud/tests/_functions.sh
@@ -0,0 +1,155 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+function print_msg {
+ local msg=$1
+ local RED='\033[0;31m'
+ local NC='\033[0m'
+
+ echo -e "${RED} $msg ---------------------------------------${NC}"
+}
+
+function _get_ovn_central_address {
+ ansible_ifconfig=$(ansible ovn-central[0] -i $test_folder/../hosting_providers/vagrant/inventory/hosts.ini -m shell -a "ifconfig eth1 |grep \"inet addr\" |awk '{print \$2}' |awk -F: '{print \$2}'")
+ if [[ $ansible_ifconfig != *CHANGED* ]]; then
+ echo "Fail to get the OVN central IP address from eth1 nic"
+ exit
+ fi
+ echo "$(echo ${ansible_ifconfig#*>>} | tr '\n' ':')6641"
+}
+
+# install_ovn_deps() - Install dependencies required for tests that require OVN
+function install_ovn_deps {
+ if ! $(yq --version &>/dev/null); then
+ sudo -E pip install yq
+ fi
+ if ! $(ovn-nbctl --version &>/dev/null); then
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ sudo apt-get install -y apt-transport-https
+ echo "deb https://packages.wand.net.nz $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/wand.list
+ sudo curl https://packages.wand.net.nz/keyring.gpg -o /etc/apt/trusted.gpg.d/wand.gpg
+ sudo apt-get update
+ sudo apt install -y ovn-common
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ fi
+}
+
+# init_network() - This function creates the OVN resouces required by the test
+function init_network {
+ local fname=$1
+ local router_name="ovn4nfv-master"
+
+ name=$(cat $fname | yq '.spec.name' | xargs)
+ subnet=$(cat $fname | yq '.spec.subnet' | xargs)
+ gateway=$(cat $fname | yq '.spec.gateway' | xargs)
+ ovn_central_address=$(_get_ovn_central_address)
+
+ router_mac=$(printf '00:00:00:%02X:%02X:%02X' $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)))
+ ovn-nbctl --may-exist --db tcp:$ovn_central_address ls-add $name -- set logical_switch $name other-config:subnet=$subnet external-ids:gateway_ip=$gateway
+ ovn-nbctl --may-exist --db tcp:$ovn_central_address lrp-add $router_name rtos-$name $router_mac $gateway
+ ovn-nbctl --may-exist --db tcp:$ovn_central_address lsp-add $name stor-$name -- set logical_switch_port stor-$name type=router options:router-port=rtos-$name addresses=\"$router_mac\"
+}
+
+# cleanup_network() - This function removes the OVN resources created for the test
+function cleanup_network {
+ local fname=$1
+
+ name=$(cat $fname | yq '.spec.name' | xargs)
+ ovn_central_address=$(_get_ovn_central_address)
+
+ for cmd in "ls-del $name" "lrp-del rtos-$name" "lsp-del stor-$name"; do
+ ovn-nbctl --if-exist --db tcp:$ovn_central_address $cmd
+ done
+}
+
+function _checks_args {
+ if [[ -z $1 ]]; then
+ echo "Missing CSAR ID argument"
+ exit 1
+ fi
+ if [[ -z $CSAR_DIR ]]; then
+ echo "CSAR_DIR global environment value is empty"
+ exit 1
+ fi
+ mkdir -p ${CSAR_DIR}/${1}
+}
+
+# destroy_deployment() - This function ensures that a specific deployment is
+# destroyed in Kubernetes
+function destroy_deployment {
+ local deployment_name=$1
+
+ echo "$(date +%H:%M:%S) - $deployment_name : Destroying deployment"
+ kubectl delete deployment $deployment_name --ignore-not-found=true --now
+ while kubectl get deployment $deployment_name &>/dev/null; do
+ echo "$(date +%H:%M:%S) - $deployment_name : Destroying deployment"
+ done
+}
+
+# recreate_deployment() - This function destroys an existing deployment and
+# creates an new one based on its yaml file
+function recreate_deployment {
+ local deployment_name=$1
+
+ destroy_deployment $deployment_name
+ kubectl create -f $deployment_name.yaml
+}
+
+# wait_deployment() - Wait process to Running status on the Deployment's pods
+function wait_deployment {
+ local deployment_name=$1
+
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+}
+
+# setup() - Base testing setup shared among functional tests
+function setup {
+ for deployment_name in $@; do
+ recreate_deployment $deployment_name
+ done
+ sleep 5
+ for deployment_name in $@; do
+ wait_deployment $deployment_name
+ done
+}
+
+# teardown() - Base testing teardown function
+function teardown {
+ for deployment_name in $@; do
+ destroy_deployment $deployment_name
+ done
+}
+
+if ! $(kubectl version &>/dev/null); then
+ echo "This funtional test requires kubectl client"
+ exit 1
+fi
+test_folder=$(pwd)
diff --git a/kud/tests/cFW/README.md b/kud/tests/cFW/README.md
new file mode 100644
index 00000000..c6ac9e20
--- /dev/null
+++ b/kud/tests/cFW/README.md
@@ -0,0 +1,10 @@
+# Cloud-Native Firewall Virtual Network Function
+
+[CNF][1] version of the ONAP vFirewall use case.
+
+## License
+
+Apache-2.0
+
+[1]: https://github.com/ligato/cn-infra/blob/master/docs/readmes/cn_virtual_function.md
+[2]: https://github.com/electrocucaracha/vFW-demo
diff --git a/kud/tests/cFW/Vagrantfile b/kud/tests/cFW/Vagrantfile
new file mode 100644
index 00000000..d02e7d01
--- /dev/null
+++ b/kud/tests/cFW/Vagrantfile
@@ -0,0 +1,33 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure("2") do |config|
+ config.vm.box = "elastic/ubuntu-16.04-x86_64"
+ config.vm.hostname = "demo"
+ config.vm.provision 'shell', path: 'postinstall.sh'
+ config.vm.network :private_network, :ip => "192.168.10.5", :type => :static # unprotected_private_net_cidr
+ config.vm.network :private_network, :ip => "192.168.20.5", :type => :static # protected_private_net_cidr
+ config.vm.network :private_network, :ip => "10.10.12.5", :type => :static, :netmask => "16" # onap_private_net_cidr
+
+ if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil
+ if not Vagrant.has_plugin?('vagrant-proxyconf')
+ system 'vagrant plugin install vagrant-proxyconf'
+ raise 'vagrant-proxyconf was installed but it requires to execute again'
+ end
+ config.proxy.http = ENV['http_proxy'] || ENV['HTTP_PROXY'] || ""
+ config.proxy.https = ENV['https_proxy'] || ENV['HTTPS_PROXY'] || ""
+ config.proxy.no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
+ config.proxy.enabled = { docker: false }
+ end
+
+ config.vm.provider 'virtualbox' do |v|
+ v.customize ["modifyvm", :id, "--memory", 8192]
+ v.customize ["modifyvm", :id, "--cpus", 2]
+ end
+ config.vm.provider 'libvirt' do |v|
+ v.memory = 8192
+ v.cpus = 2
+ v.nested = true
+ v.cpu_mode = 'host-passthrough'
+ end
+end
diff --git a/kud/tests/cFW/darkstat/Dockerfile b/kud/tests/cFW/darkstat/Dockerfile
new file mode 100644
index 00000000..d3a46b9c
--- /dev/null
+++ b/kud/tests/cFW/darkstat/Dockerfile
@@ -0,0 +1,14 @@
+FROM ubuntu:16.04
+MAINTAINER Victor Morales <electrocucaracha@gmail.com>
+
+ARG HTTP_PROXY=${HTTP_PROXY}
+ARG HTTPS_PROXY=${HTTPS_PROXY}
+
+ENV http_proxy $HTTP_PROXY
+ENV https_proxy $HTTPS_PROXY
+
+RUN apt-get update && apt-get install -y -qq darkstat
+
+EXPOSE 667
+
+CMD ["/usr/sbin/darkstat", "-i", "eth1", "--no-daemon"]
diff --git a/kud/tests/cFW/docker-compose.yml b/kud/tests/cFW/docker-compose.yml
new file mode 100644
index 00000000..6d883fbd
--- /dev/null
+++ b/kud/tests/cFW/docker-compose.yml
@@ -0,0 +1,38 @@
+version: '3'
+
+services:
+ packetgen:
+ privileged: true
+ network_mode: "host"
+ image: electrocucaracha/packetgen
+ build:
+ context: ./packetgen
+ args:
+ HTTP_PROXY: $HTTP_PROXY
+ HTTPS_PROXY: $HTTPS_PROXY
+ firewall:
+ privileged: true
+ network_mode: "host"
+ image: electrocucaracha/firewall
+ build:
+ context: ./firewall
+ args:
+ HTTP_PROXY: $HTTP_PROXY
+ HTTPS_PROXY: $HTTPS_PROXY
+ sink:
+ privileged: true
+ network_mode: "host"
+ image: electrocucaracha/sink
+ build:
+ context: ./sink
+ args:
+ HTTP_PROXY: $HTTP_PROXY
+ HTTPS_PROXY: $HTTPS_PROXY
+ darkstat:
+ network_mode: "host"
+ image: electrocucaracha/darkstat
+ build:
+ context: ./darkstat
+ args:
+ HTTP_PROXY: $HTTP_PROXY
+ HTTPS_PROXY: $HTTPS_PROXY
diff --git a/kud/tests/cFW/firewall/Dockerfile b/kud/tests/cFW/firewall/Dockerfile
new file mode 100644
index 00000000..7d3e6ede
--- /dev/null
+++ b/kud/tests/cFW/firewall/Dockerfile
@@ -0,0 +1,49 @@
+FROM electrocucaracha/vpp
+MAINTAINER Victor Morales <electrocucaracha@gmail.com>
+
+ARG HTTP_PROXY=${HTTP_PROXY}
+ARG HTTPS_PROXY=${HTTPS_PROXY}
+
+ENV http_proxy $HTTP_PROXY
+ENV https_proxy $HTTPS_PROXY
+ENV repo_url "https://nexus.onap.org/content/repositories/staging/org/onap/demo/vnf"
+
+ENV protected_net_cidr "192.168.20.0/24"
+ENV fw_ipaddr "192.168.10.100"
+ENV sink_ipaddr "192.168.20.250"
+ENV demo_artifacts_version "1.3.0"
+
+RUN apt-get install -y -qq wget openjdk-8-jre bridge-utils net-tools \
+ bsdmainutils make gcc libcurl4-gnutls-dev
+
+WORKDIR /opt
+
+RUN wget "https://git.onap.org/demo/plain/vnfs/vFW/scripts/v_firewall_init.sh" \
+ && chmod +x v_firewall_init.sh \
+ && sed -i 's|start vpp|/usr/bin/vpp -c /etc/vpp/startup.conf|g' v_firewall_init.sh
+
+RUN wget "${repo_url}/sample-distribution/${demo_artifacts_version}/sample-distribution-${demo_artifacts_version}-hc.tar.gz" \
+ && tar -zmxf sample-distribution-${demo_artifacts_version}-hc.tar.gz \
+ && rm sample-distribution-${demo_artifacts_version}-hc.tar.gz \
+ && mv sample-distribution-${demo_artifacts_version} honeycomb \
+ && sed -i 's/"restconf-binding-address": "127.0.0.1",/"restconf-binding-address": "0.0.0.0",/g' /opt/honeycomb/config/honeycomb.json
+
+RUN wget "${repo_url}/ves5/ves/${demo_artifacts_version}/ves-${demo_artifacts_version}-demo.tar.gz" \
+ && tar -zmxf ves-${demo_artifacts_version}-demo.tar.gz \
+ && rm ves-${demo_artifacts_version}-demo.tar.gz \
+ && mv ves-${demo_artifacts_version} VES
+
+RUN wget "${repo_url}/ves5/ves_vfw_reporting/${demo_artifacts_version}/ves_vfw_reporting-${demo_artifacts_version}-demo.tar.gz" \
+ && tar -zmxf ves_vfw_reporting-${demo_artifacts_version}-demo.tar.gz \
+ && rm ves_vfw_reporting-${demo_artifacts_version}-demo.tar.gz \
+ && mv ves_vfw_reporting-${demo_artifacts_version} VES/evel/evel-library/code/VESreporting \
+ && chmod +x VES/evel/evel-library/code/VESreporting/go-client.sh \
+ && cd VES/evel/evel-library/bldjobs/ && make clean && make && cd -
+
+RUN mkdir -p /opt/config/ \
+ && echo $protected_net_cidr > /opt/config/protected_net_cidr.txt \
+ && echo $fw_ipaddr > /opt/config/fw_ipaddr.txt \
+ && echo $sink_ipaddr > /opt/config/sink_ipaddr.txt \
+ && echo $demo_artifacts_version > /opt/config/demo_artifacts_version.txt
+
+CMD ["./v_firewall_init.sh"]
diff --git a/kud/tests/cFW/packetgen/Dockerfile b/kud/tests/cFW/packetgen/Dockerfile
new file mode 100644
index 00000000..cb1da555
--- /dev/null
+++ b/kud/tests/cFW/packetgen/Dockerfile
@@ -0,0 +1,44 @@
+FROM electrocucaracha/vpp
+MAINTAINER Victor Morales <electrocucaracha@gmail.com>
+
+ARG HTTP_PROXY=${HTTP_PROXY}
+ARG HTTPS_PROXY=${HTTPS_PROXY}
+
+ENV http_proxy $HTTP_PROXY
+ENV https_proxy $HTTPS_PROXY
+ENV repo_url "https://nexus.onap.org/content/repositories/staging/org/onap/demo/vnf"
+
+ENV protected_net_cidr "192.168.20.0/24"
+ENV fw_ipaddr "192.168.10.100"
+ENV sink_ipaddr "192.168.20.250"
+ENV demo_artifacts_version "1.3.0"
+
+RUN apt-get install -y -qq wget openjdk-8-jre bridge-utils net-tools \
+ bsdmainutils
+
+WORKDIR /opt
+EXPOSE 8183
+
+RUN wget "https://git.onap.org/demo/plain/vnfs/vFW/scripts/v_packetgen_init.sh" \
+ && wget "https://git.onap.org/demo/plain/vnfs/vFW/scripts/run_traffic_fw_demo.sh" \
+ && chmod +x *.sh \
+ && sed -i 's|start vpp|/usr/bin/vpp -c /etc/vpp/startup.conf|g;s|/opt/honeycomb/sample-distribution-\$VERSION/honeycomb|/opt/honeycomb/honeycomb|g' v_packetgen_init.sh
+
+RUN wget "${repo_url}/sample-distribution/${demo_artifacts_version}/sample-distribution-${demo_artifacts_version}-hc.tar.gz" \
+ && tar -zmxf sample-distribution-${demo_artifacts_version}-hc.tar.gz \
+ && rm sample-distribution-${demo_artifacts_version}-hc.tar.gz \
+ && mv sample-distribution-${demo_artifacts_version} honeycomb \
+ && sed -i 's/"restconf-binding-address": "127.0.0.1",/"restconf-binding-address": "0.0.0.0",/g' /opt/honeycomb/config/honeycomb.json
+
+RUN wget "${repo_url}/vfw/vfw_pg_streams/${demo_artifacts_version}/vfw_pg_streams-${demo_artifacts_version}-demo.tar.gz" \
+ && tar -zmxf vfw_pg_streams-${demo_artifacts_version}-demo.tar.gz \
+ && rm vfw_pg_streams-${demo_artifacts_version}-demo.tar.gz \
+ && mv vfw_pg_streams-${demo_artifacts_version} pg_streams
+
+RUN mkdir -p /opt/config/ \
+ && echo $protected_net_cidr > /opt/config/protected_net_cidr.txt \
+ && echo $fw_ipaddr > /opt/config/fw_ipaddr.txt \
+ && echo $sink_ipaddr > /opt/config/sink_ipaddr.txt \
+ && echo $demo_artifacts_version > /opt/config/demo_artifacts_version.txt
+
+CMD ["./v_packetgen_init.sh"]
diff --git a/kud/tests/cFW/postinstall.sh b/kud/tests/cFW/postinstall.sh
new file mode 100755
index 00000000..5a1d5043
--- /dev/null
+++ b/kud/tests/cFW/postinstall.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o nounset
+set -o pipefail
+set -o xtrace
+
+# install_docker() - Download and install docker-engine
+function install_docker {
+ local max_concurrent_downloads=${1:-3}
+
+ if $(docker version &>/dev/null); then
+ return
+ fi
+ apt-get install -y software-properties-common linux-image-extra-$(uname -r) linux-image-extra-virtual apt-transport-https ca-certificates curl
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+ apt-get update
+ apt-get install -y docker-ce
+
+ mkdir -p /etc/systemd/system/docker.service.d
+ if [ $http_proxy ]; then
+ cat <<EOL > /etc/systemd/system/docker.service.d/http-proxy.conf
+[Service]
+Environment="HTTP_PROXY=$http_proxy"
+EOL
+ fi
+ if [ $https_proxy ]; then
+ cat <<EOL > /etc/systemd/system/docker.service.d/https-proxy.conf
+[Service]
+Environment="HTTPS_PROXY=$https_proxy"
+EOL
+ fi
+ if [ $no_proxy ]; then
+ cat <<EOL > /etc/systemd/system/docker.service.d/no-proxy.conf
+[Service]
+Environment="NO_PROXY=$no_proxy"
+EOL
+ fi
+ systemctl daemon-reload
+ echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --max-concurrent-downloads $max_concurrent_downloads \"" >> /etc/default/docker
+ usermod -aG docker $USER
+
+ systemctl restart docker
+ sleep 10
+}
+
+# install_docker_compose() - Installs docker compose python module
+function install_docker_compose {
+ if ! which pip; then
+ curl -sL https://bootstrap.pypa.io/get-pip.py | python
+ fi
+ pip install --upgrade pip
+ pip install docker-compose
+}
+
+echo 'vm.nr_hugepages = 1024' >> /etc/sysctl.conf
+sysctl -p
+
+install_docker
+install_docker_compose
+
+cd /vagrant
+# build vpp docker image
+BUILD_ARGS="--no-cache"
+if [ $HTTP_PROXY ]; then
+ BUILD_ARGS+=" --build-arg HTTP_PROXY=${HTTP_PROXY}"
+fi
+if [ $HTTPS_PROXY ]; then
+ BUILD_ARGS+=" --build-arg HTTPS_PROXY=${HTTPS_PROXY}"
+fi
+pushd vpp
+docker build ${BUILD_ARGS} -t electrocucaracha/vpp:latest .
+popd
+
+docker-compose up -d
diff --git a/kud/tests/cFW/sink/Dockerfile b/kud/tests/cFW/sink/Dockerfile
new file mode 100644
index 00000000..6b43ba61
--- /dev/null
+++ b/kud/tests/cFW/sink/Dockerfile
@@ -0,0 +1,34 @@
+FROM ubuntu:16.04
+MAINTAINER Victor Morales <electrocucaracha@gmail.com>
+
+ARG HTTP_PROXY=${HTTP_PROXY}
+ARG HTTPS_PROXY=${HTTPS_PROXY}
+
+ENV http_proxy $HTTP_PROXY
+ENV https_proxy $HTTPS_PROXY
+
+ENV protected_net_cidr "192.168.20.0/24"
+ENV fw_ipaddr "192.168.10.100"
+ENV sink_ipaddr "192.168.20.250"
+ENV demo_artifacts_version "1.3.0"
+ENV protected_net_gw "192.168.20.100"
+ENV unprotected_net "192.168.10.0/24"
+
+RUN apt-get update && apt-get install -y -qq wget net-tools
+
+WORKDIR /opt
+
+RUN wget "https://git.onap.org/demo/plain/vnfs/vFW/scripts/v_sink_init.sh" \
+ && chmod +x v_sink_init.sh
+
+RUN mkdir -p config/ \
+ && echo $protected_net_cidr > config/protected_net_cidr.txt \
+ && echo $fw_ipaddr > config/fw_ipaddr.txt \
+ && echo $sink_ipaddr > config/sink_ipaddr.txt \
+ && echo $demo_artifacts_version > config/demo_artifacts_version.txt \
+ && echo $protected_net_gw > config/protected_net_gw.txt \
+ && echo $unprotected_net > config/unprotected_net.txt
+
+# NOTE: this script executes $ route add -net 192.168.10.0 netmask 255.255.255.0 gw 192.168.20.100
+# which results in this error if doesn't have all nics required -> SIOCADDRT: File exists
+CMD ["./v_sink_init.sh"]
diff --git a/kud/tests/cFW/vpp/80-vpp.conf b/kud/tests/cFW/vpp/80-vpp.conf
new file mode 100644
index 00000000..8fdf184c
--- /dev/null
+++ b/kud/tests/cFW/vpp/80-vpp.conf
@@ -0,0 +1,15 @@
+# Number of 2MB hugepages desired
+vm.nr_hugepages=1024
+
+# Must be greater than or equal to (2 * vm.nr_hugepages).
+vm.max_map_count=3096
+
+# All groups allowed to access hugepages
+vm.hugetlb_shm_group=0
+
+# Shared Memory Max must be greator or equal to the total size of hugepages.
+# For 2MB pages, TotalHugepageSize = vm.nr_hugepages * 2 * 1024 * 1024
+# If the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax)
+# is greater than the calculated TotalHugepageSize then set this parameter
+# to current shmmax value.
+kernel.shmmax=2147483648
diff --git a/kud/tests/cFW/vpp/Dockerfile b/kud/tests/cFW/vpp/Dockerfile
new file mode 100644
index 00000000..63b08b01
--- /dev/null
+++ b/kud/tests/cFW/vpp/Dockerfile
@@ -0,0 +1,17 @@
+FROM ubuntu:16.04
+MAINTAINER Victor Morales <electrocucaracha@gmail.com>
+
+ARG HTTP_PROXY=${HTTP_PROXY}
+ARG HTTPS_PROXY=${HTTPS_PROXY}
+
+ENV http_proxy $HTTP_PROXY
+ENV https_proxy $HTTPS_PROXY
+
+RUN apt-get update && apt-get install -y -qq apt-transport-https \
+ && echo "deb [trusted=yes] https://nexus.fd.io/content/repositories/fd.io.stable.1609.ubuntu.xenial.main/ ./" | tee -a /etc/apt/sources.list.d/99fd.io.list \
+ && apt-get update \
+ && apt-get install -y -qq vpp vpp-lib vpp-plugins
+
+COPY 80-vpp.conf /etc/sysctl.d/80-vpp.conf
+
+CMD ["/usr/bin/vpp", "-c", "/etc/vpp/startup.conf"]
diff --git a/kud/tests/generic_simulator/Dockerfile b/kud/tests/generic_simulator/Dockerfile
new file mode 100644
index 00000000..202cafc6
--- /dev/null
+++ b/kud/tests/generic_simulator/Dockerfile
@@ -0,0 +1,27 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+FROM python:2.7
+
+ARG HTTP_PROXY=${HTTP_PROXY}
+ARG HTTPS_PROXY=${HTTPS_PROXY}
+
+ENV http_proxy $HTTP_PROXY
+ENV https_proxy $HTTPS_PROXY
+
+EXPOSE 8080
+
+RUN mkdir -p /{tmp,etc}/generic_sim
+
+WORKDIR /opt/generic_sim/
+
+COPY . .
+RUN pip install --no-cache-dir -r requirements.txt
+
+CMD [ "python", "generic_sim.py" ]
diff --git a/kud/tests/generic_simulator/aai/responses.yml b/kud/tests/generic_simulator/aai/responses.yml
new file mode 100644
index 00000000..041e5207
--- /dev/null
+++ b/kud/tests/generic_simulator/aai/responses.yml
@@ -0,0 +1,18 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne:
+ GET:
+ body: '{"cloud-owner":"CloudOwner","cloud-region-id":"RegionOne","cloud-type":"openstack","owner-defined-type":"t1","cloud-region-version":"RegionOne","identity-url":"http://keystone:8080/v3","cloud-zone":"z1","complex-name":"clli1","sriov-automation":false,"cloud-extra-info":"","resource-version":"1524845154715"}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
diff --git a/kud/tests/generic_simulator/generic_sim.py b/kud/tests/generic_simulator/generic_sim.py
new file mode 100644
index 00000000..4392b652
--- /dev/null
+++ b/kud/tests/generic_simulator/generic_sim.py
@@ -0,0 +1,109 @@
+# Copyright 2018 Intel Corporation, Inc
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import logging
+
+import web
+from web import webapi
+import yaml
+
+urls = (
+ '/(.*)','MockController'
+)
+
+def setup_logger(name, log_file, level=logging.DEBUG):
+ print("Configuring the logger...")
+ handler = logging.FileHandler(log_file)
+ formatter = logging.Formatter('%(message)s')
+ handler.setFormatter(formatter)
+
+ logger = logging.getLogger(name)
+ logger.setLevel(level)
+ logger.addHandler(handler)
+
+ return logger
+
+
+class MockResponse:
+ def __init__(self, http_verb, status_code,
+ content_type="application/json", body="{}",
+ headers={}):
+ self.http_verb = http_verb.lower()
+ self.status_code = status_code
+ self.content_type = content_type
+ self.body = body
+ self.headers = headers
+
+def _parse_responses(parsed_responses):
+ result = {}
+ for path, responses in parsed_responses.iteritems():
+ new_path = path
+ if path.startswith("/"):
+ new_path = path[1:]
+
+ result[new_path] = []
+ for http_verb, response in responses.iteritems():
+ result[new_path].append(MockResponse(http_verb, **response))
+ return result
+
+def load_responses(filename):
+ print("Loading responses from configuration file..")
+ with open(filename) as yaml_file:
+ responses_file = yaml.safe_load(yaml_file)
+ responses_map = _parse_responses(responses_file)
+ return responses_map
+
+
+class MockController:
+
+ def _do_action(self, action):
+ logger.info('{}'.format(web.ctx.env.get('wsgi.input').read()))
+ action = action.lower()
+ url = web.ctx['fullpath']
+ try:
+ if url.startswith("/"):
+ url = url[1:]
+ response = [ r for r in responses_map[url] if r.http_verb == action][0]
+ for header, value in response.headers.iteritems():
+ web.header(header, value)
+ web.header('Content-Type', response.content_type)
+ print(response.body)
+ return response.body
+ except:
+ webapi.NotFound()
+
+ def DELETE(self, url):
+ return self._do_action("delete")
+
+ def HEAD(self, url):
+ return self._do_action("head")
+
+ def PUT(self, url):
+ return self._do_action("put")
+
+ def GET(self, url):
+ return self._do_action("get")
+
+ def POST(self, url):
+ return self._do_action("post")
+
+ def PATCH(self, url):
+ return self._do_action("patch")
+
+
+logger = setup_logger('mock_controller', '/tmp/generic_sim/output.log')
+responses_map = load_responses('/etc/generic_sim/responses.yml')
+app = web.application(urls, globals())
+if __name__ == "__main__":
+ app.run()
diff --git a/kud/tests/generic_simulator/requirements.txt b/kud/tests/generic_simulator/requirements.txt
new file mode 100644
index 00000000..a0b6aae2
--- /dev/null
+++ b/kud/tests/generic_simulator/requirements.txt
@@ -0,0 +1,11 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+PyYAML
+web.py
diff --git a/kud/tests/integration_cFW.sh b/kud/tests/integration_cFW.sh
new file mode 100755
index 00000000..92c280b9
--- /dev/null
+++ b/kud/tests/integration_cFW.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _common.sh
+source _functions.sh
+
+csar_id=4f726e2a-b74a-11e8-ad7c-525400feed2
+
+# Setup
+populate_CSAR_containers_vFW $csar_id
+
+pushd ${CSAR_DIR}/${csar_id}
+for resource in $unprotected_private_net $protected_private_net $onap_private_net; do
+ kubectl apply -f $resource.yaml
+done
+setup $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
+
+# Test
+popd
+
+# Teardown
+teardown $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
diff --git a/kud/tests/integration_vFW.sh b/kud/tests/integration_vFW.sh
new file mode 100755
index 00000000..78a6b10c
--- /dev/null
+++ b/kud/tests/integration_vFW.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _common.sh
+source _functions.sh
+
+csar_id=66fea6f0-b74d-11e8-95a0-525400feed26
+
+# Setup
+if [[ ! -f $HOME/.ssh/id_rsa.pub ]]; then
+ echo -e "\n\n\n" | ssh-keygen -t rsa -N ""
+fi
+populate_CSAR_vms_vFW $csar_id
+
+pushd ${CSAR_DIR}/${csar_id}
+for resource in $unprotected_private_net $protected_private_net $onap_private_net; do
+ kubectl apply -f $resource.yaml
+done
+setup $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
+
+# Test
+for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
+ pod_name=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
+ vm=$(kubectl virt virsh list | grep ".*$deployment_name" | awk '{print $2}')
+ echo "Pod name: $pod_name Virsh domain: $vm"
+ echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")"
+ echo "kubectl attach -it $pod_name"
+ echo "=== Virtlet details ===="
+ echo "$(kubectl virt virsh dumpxml $vm | grep VIRTLET_)\n"
+done
+popd
+
+# Teardown
+teardown $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
diff --git a/kud/tests/integration_vcFW.sh b/kud/tests/integration_vcFW.sh
new file mode 100755
index 00000000..ccda1190
--- /dev/null
+++ b/kud/tests/integration_vcFW.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _common.sh
+source _functions.sh
+
+csar_id=aa443e7e-c8ba-11e8-8877-525400b164ff
+
+# Setup
+install_ovn_deps
+if [[ ! -f $HOME/.ssh/id_rsa.pub ]]; then
+ echo -e "\n\n\n" | ssh-keygen -t rsa -N ""
+fi
+populate_CSAR_vms_containers_vFW $csar_id
+
+pushd ${CSAR_DIR}/${csar_id}
+for net in $unprotected_private_net $protected_private_net $onap_private_net; do
+ cleanup_network $net.yaml
+ echo "Create OVN Network $net network"
+ init_network $net.yaml
+done
+for resource in onap-ovn4nfvk8s-network sink-service; do
+ kubectl apply -f $resource.yaml
+done
+setup $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
+#kubectl port-forward deployment/$sink_deployment_name 667:667
+
+# Test
+for deployment_name in $packetgen_deployment_name $firewall_deployment_name; do
+ pod_name=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
+ vm=$(kubectl virt virsh list | grep ".*$deployment_name" | awk '{print $2}')
+ echo "Pod name: $pod_name Virsh domain: $vm"
+ echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")"
+ echo "kubectl attach -it $pod_name"
+ echo "=== Virtlet details ===="
+ echo "$(kubectl virt virsh dumpxml $vm | grep VIRTLET_)\n"
+done
+
+# Teardown
+#teardown $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name
+#for net in $unprotected_private_net $protected_private_net $onap_private_net; do
+# cleanup_network $net.yaml
+#done
+popd
diff --git a/kud/tests/istio.sh b/kud/tests/istio.sh
new file mode 100755
index 00000000..c525bb8e
--- /dev/null
+++ b/kud/tests/istio.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _functions.sh
+
+csar_id=ac39959e-e82c-11e8-9133-525400912638
+
+base_dest=$(grep "base_dest:" $test_folder/../deployment_infra/playbooks/kud-vars.yml | awk -F ': ' '{print $2}')
+istio_dest=$(grep "istio_dest:" $test_folder/../deployment_infra/playbooks/kud-vars.yml | awk -F ': ' '{print $2}' | sed "s|{{ base_dest }}|$base_dest|g;s|\"||g")
+istio_version=$(grep "istio_version:" $test_folder/../deployment_infra/playbooks/kud-vars.yml | awk -F ': ' '{print $2}')
+
+if ! $(istioctl version &>/dev/null); then
+ echo "This funtional test requires istioctl client"
+ exit 1
+fi
+
+_checks_args $csar_id
+pushd ${CSAR_DIR}/${csar_id}
+istioctl kube-inject -f $istio_dest/istio-$istio_version/samples/bookinfo/platform/kube/bookinfo.yaml > bookinfo-inject.yml
+kubectl apply -f bookinfo-inject.yml
+kubectl apply -f $istio_dest/istio-$istio_version/samples/bookinfo/networking/bookinfo-gateway.yaml
+
+for deployment in details-v1 productpage-v1 ratings-v1 reviews-v1 reviews-v2 reviews-v3; do
+ wait_deployment $deployment
+done
+INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')
+INGRESS_HOST=$(kubectl get po -l istio=ingressgateway -n istio-system -o 'jsonpath={.items[0].status.hostIP}')
+curl -o /dev/null -s -w "%{http_code}\n" http://$INGRESS_HOST:$INGRESS_PORT/productpage
+popd
diff --git a/kud/tests/multus.sh b/kud/tests/multus.sh
new file mode 100755
index 00000000..859fa3bb
--- /dev/null
+++ b/kud/tests/multus.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _common.sh
+source _functions.sh
+
+csar_id=49408ca6-b75b-11e8-8076-525400feed26
+
+# Setup
+populate_CSAR_multus $csar_id
+
+pushd ${CSAR_DIR}/${csar_id}
+kubectl apply -f bridge-network.yaml
+
+setup $multus_deployment_name
+
+# Test
+deployment_pod=$(kubectl get pods | grep $multus_deployment_name | awk '{print $1}')
+echo "===== $deployment_pod details ====="
+kubectl exec -it $deployment_pod -- ip a
+multus_nic=$(kubectl exec -it $deployment_pod -- ifconfig | grep "eth1")
+if [ -z "$multus_nic" ]; then
+ echo "The $deployment_pod pod doesn't contain the eth1 nic"
+ exit 1
+fi
+popd
+
+# Teardown
+teardown $multus_deployment_name
diff --git a/kud/tests/nfd.sh b/kud/tests/nfd.sh
new file mode 100755
index 00000000..17548206
--- /dev/null
+++ b/kud/tests/nfd.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+rm -f $HOME/*.yaml
+
+pod_name=nfd-pod
+
+cat << POD > $HOME/$pod_name.yaml
+apiVersion:
+ v1
+kind: Pod
+metadata:
+ name: $pod_name
+ labels:
+ env: test
+spec:
+ containers:
+ - name: nginx
+ image: nginx
+nodeSelector:
+ node.alpha.kubernetes-incubator.io/nfd-network-SRIOV: true
+POD
+
+if $(kubectl version &>/dev/null); then
+ labels=$(kubectl get nodes -o json | jq .items[].metadata.labels)
+
+ echo $labels
+ if [[ $labels != *"node.alpha.kubernetes-incubator.io"* ]]; then
+ exit 1
+ fi
+
+ kubectl delete pod $pod_name --ignore-not-found=true --now
+ while kubectl get pod $pod_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/$pod_name.yaml --validate=false
+
+ for pod in $pod_name; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $pod : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+fi
diff --git a/kud/tests/ovn-kubernetes.sh b/kud/tests/ovn-kubernetes.sh
new file mode 100755
index 00000000..95d216bf
--- /dev/null
+++ b/kud/tests/ovn-kubernetes.sh
@@ -0,0 +1,136 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+apache_pod_name=apachetwin
+nginx_pod_name=nginxtwin
+
+cat << APACHEPOD > $HOME/apache-pod.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: $apache_pod_name
+ labels:
+ name: webserver
+spec:
+ containers:
+ - name: apachetwin
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+APACHEPOD
+
+cat << NGINXPOD > $HOME/nginx-pod.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: $nginx_pod_name
+ labels:
+ name: webserver
+spec:
+ containers:
+ - name: nginxtwin
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+NGINXPOD
+
+cat << APACHEEW > $HOME/apache-e-w.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: apacheservice
+ role: service
+ name: apacheservice
+spec:
+ ports:
+ - port: 8800
+ targetPort: 80
+ protocol: TCP
+ name: tcp
+ selector:
+ name: webserver
+APACHEEW
+
+cat << APACHENS > $HOME/apache-n-s.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: apacheexternal
+ role: service
+ name: apacheexternal
+spec:
+ ports:
+ - port: 8800
+ targetPort: 80
+ protocol: TCP
+ name: tcp
+ selector:
+ name: webserver
+ type: NodePort
+APACHENS
+
+if $(kubectl version &>/dev/null); then
+ kubectl apply -f $HOME/apache-e-w.yaml
+ kubectl apply -f $HOME/apache-n-s.yaml
+
+ kubectl delete pod $apache_pod_name --ignore-not-found=true --now
+ kubectl delete pod $nginx_pod_name --ignore-not-found=true --now
+ while kubectl get pod $apache_pod_name &>/dev/null; do
+ sleep 5
+ done
+ while kubectl get pod $nginx_pod_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/apache-pod.yaml
+ kubectl create -f $HOME/nginx-pod.yaml
+
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $apache_pod_name | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $nginx_pod_name | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ apache_ovn=$(kubectl get pod $apache_pod_name -o jsonpath="{.metadata.annotations.ovn}")
+ nginx_ovn=$(kubectl get pod $nginx_pod_name -o jsonpath="{.metadata.annotations.ovn}")
+
+ echo $apache_ovn
+ if [[ $apache_ovn != *"\"ip_address\":\"11.11."* ]]; then
+ exit 1
+ fi
+
+ echo $nginx_ovn
+ if [[ $nginx_ovn != *"\"ip_address\":\"11.11."* ]]; then
+ exit 1
+ fi
+fi
diff --git a/kud/tests/ovn4nfv.sh b/kud/tests/ovn4nfv.sh
new file mode 100755
index 00000000..37fddfd8
--- /dev/null
+++ b/kud/tests/ovn4nfv.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _common.sh
+source _functions.sh
+
+csar_id=a1c5b53e-d7ab-11e8-85b7-525400e8c29a
+
+# Setup
+install_ovn_deps
+populate_CSAR_ovn4nfv $csar_id
+
+pushd ${CSAR_DIR}/${csar_id}
+for net in ovn-priv-net ovn-port-net; do
+ cleanup_network $net.yaml
+ echo "Create OVN Network $net network"
+ init_network $net.yaml
+done
+kubectl apply -f onap-ovn4nfvk8s-network.yaml
+setup $ovn4nfv_deployment_name
+
+# Test
+deployment_pod=$(kubectl get pods | grep $ovn4nfv_deployment_name | awk '{print $1}')
+echo "===== $deployment_pod details ====="
+kubectl exec -it $deployment_pod -- ip a
+multus_nic=$(kubectl exec -it $deployment_pod -- ifconfig | grep "net1")
+if [ -z "$multus_nic" ]; then
+ echo "The $deployment_pod pod doesn't contain the net1 nic"
+ exit 1
+fi
+
+# Teardown
+teardown $ovn4nfv_deployment_name
+cleanup_network ovn-priv-net.yaml
+cleanup_network ovn-port-net.yaml
+popd
diff --git a/kud/tests/plugin.sh b/kud/tests/plugin.sh
new file mode 100755
index 00000000..6cf93cef
--- /dev/null
+++ b/kud/tests/plugin.sh
@@ -0,0 +1,161 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+#set -o xtrace
+
+source _common.sh
+source _functions.sh
+
+base_url="http://localhost:8081"
+cloud_region_id="kud"
+namespace="default"
+csar_id="94e414f6-9ca4-11e8-bb6a-52540067263b"
+rbd_csar_id="7eb09e38-4363-9942-1234-3beb2e95fd85"
+definition_id="9d117af8-30b8-11e9-af94-525400277b3d"
+profile_id="ebe353d2-30b7-11e9-9515-525400277b3d"
+
+# _build_generic_sim() - Creates a generic simulator image in case that doesn't exist
+function _build_generic_sim {
+ if [[ -n $(docker images -q generic_sim) ]]; then
+ return
+ fi
+ BUILD_ARGS="--no-cache"
+ if [ $HTTP_PROXY ]; then
+ BUILD_ARGS+=" --build-arg HTTP_PROXY=${HTTP_PROXY}"
+ fi
+ if [ $HTTPS_PROXY ]; then
+ BUILD_ARGS+=" --build-arg HTTPS_PROXY=${HTTPS_PROXY}"
+ fi
+
+ pushd generic_simulator
+ echo "Building generic simulator image..."
+ docker build ${BUILD_ARGS} -t generic_sim:latest .
+ popd
+}
+
+# start_aai_service() - Starts a simulator for AAI service
+function start_aai_service {
+ _build_generic_sim
+ if [[ $(docker ps -q --all --filter "name=aai") ]]; then
+ docker rm aai -f
+ fi
+ echo "Start AAI simulator.."
+ docker run --name aai -v $(mktemp):/tmp/generic_sim/ -v $(pwd)/generic_simulator/aai/:/etc/generic_sim/ -p 8443:8080 -d generic_sim
+}
+
+# Setup
+destroy_deployment $plugin_deployment_name
+
+#start_aai_service
+populate_CSAR_plugin $csar_id
+populate_CSAR_rbdefinition $rbd_csar_id
+
+# Test
+print_msg "Create Resource Bundle Definition Metadata"
+payload_raw="
+{
+ \"name\": \"test-rbdef\",
+ \"chart-name\": \"vault-consul-dev\",
+ \"description\": \"testing resource bundle definition api\",
+ \"uuid\": \"$definition_id\",
+ \"service-type\": \"firewall\"
+}
+"
+payload=$(echo $payload_raw | tr '\n' ' ')
+rbd_id=$(curl -s -d "$payload" -X POST "${base_url}/v1/rb/definition" | jq -r '.uuid')
+
+print_msg "Upload Resource Bundle Definition Content"
+curl -s --data-binary @${CSAR_DIR}/${rbd_csar_id}/${rbd_content_tarball}.gz -X POST "${base_url}/v1/rb/definition/$rbd_id/content"
+
+print_msg "Listing Resource Bundle Definitions"
+rbd_id_list=$(curl -s -X GET "${base_url}/v1/rb/definition")
+if [[ "$rbd_id_list" != *"${rbd_id}"* ]]; then
+ echo $rbd_id_list
+ echo "Resource Bundle Definition not stored"
+ exit 1
+fi
+
+print_msg "Create Resource Bundle Profile Metadata"
+kubeversion=$(kubectl version | grep 'Server Version' | awk -F '"' '{print $6}')
+payload_raw="
+{
+ \"name\": \"test-rbprofile\",
+ \"namespace\": \"$namespace\",
+ \"rbdid\": \"$definition_id\",
+ \"uuid\": \"$profile_id\",
+ \"kubernetesversion\": \"$kubeversion\"
+}
+"
+payload=$(echo $payload_raw | tr '\n' ' ')
+rbp_id=$(curl -s -d "$payload" -X POST "${base_url}/v1/rb/profile" | jq -r '.uuid')
+
+print_msg "Upload Resource Bundle Profile Content"
+curl -s --data-binary @${CSAR_DIR}/${rbd_csar_id}/${rbp_content_tarball}.gz -X POST "${base_url}/v1/rb/profile/$rbp_id/content"
+
+print_msg "Listing Resource Bundle Profiles"
+rbp_id_list=$(curl -s -X GET "${base_url}/v1/rb/profile")
+if [[ "$rbp_id_list" != *"${rbp_id}"* ]]; then
+ echo $rbd_id_list
+ echo "Resource Bundle Profile not stored"
+ exit 1
+fi
+
+print_msg "Instantiate Profile"
+payload_raw="
+{
+ \"cloud_region_id\": \"$cloud_region_id\",
+ \"rb_profile_id\":\"$profile_id\",
+ \"csar_id\": \"$csar_id\"
+}
+"
+payload=$(echo $payload_raw | tr '\n' ' ')
+vnf_id=$(curl -s -d "$payload" "${base_url}/v1/vnf_instances/" | jq -r '.vnf_id')
+
+print_msg "Validating Kubernetes"
+kubectl get --no-headers=true --namespace=${namespace} deployment ${cloud_region_id}-${namespace}-${vnf_id}-test-rbprofile-vault-consul-dev
+kubectl get --no-headers=true --namespace=${namespace} service ${cloud_region_id}-${namespace}-${vnf_id}-override-vault-consul
+echo "VNF Instance created succesfully with id: $vnf_id"
+
+print_msg "Listing VNF Instances"
+vnf_id_list=$(curl -s -X GET "${base_url}/v1/vnf_instances/${cloud_region_id}/${namespace}" | jq -r '.vnf_id_list')
+if [[ "$vnf_id_list" != *"${vnf_id}"* ]]; then
+ echo $vnf_id_list
+ echo "VNF Instance not stored"
+ exit 1
+fi
+
+print_msg "Getting $vnf_id VNF Instance information"
+vnf_details=$(curl -s -X GET "${base_url}/v1/vnf_instances/${cloud_region_id}/${namespace}/${vnf_id}")
+if [[ -z "$vnf_details" ]]; then
+ echo "Cannot retrieved VNF Instance details"
+ exit 1
+fi
+echo "VNF details $vnf_details"
+
+print_msg "Deleting $rbd_id Resource Bundle Definition"
+curl -X DELETE "${base_url}/v1/rb/definition/$rbd_id"
+if [[ 500 -ne $(curl -o /dev/null -w %{http_code} -s -X GET "${base_url}/v1/rb/definition/$rbd_id") ]]; then
+ echo "Resource Bundle Definition not deleted"
+# TODO: Change the HTTP code for 404 when the resource is not found in the API
+ exit 1
+fi
+
+print_msg "Deleting $vnf_id VNF Instance"
+curl -X DELETE "${base_url}/v1/vnf_instances/${cloud_region_id}/${namespace}/${vnf_id}"
+if [[ 404 -ne $(curl -o /dev/null -w %{http_code} -s -X GET "${base_url}${cloud_region_id}/${namespace}/${vnf_id}") ]]; then
+ echo "VNF Instance not deleted"
+ exit 1
+fi
+
+# Teardown
+teardown $plugin_deployment_name
diff --git a/kud/tests/plugin_edgex.sh b/kud/tests/plugin_edgex.sh
new file mode 100755
index 00000000..3165efbf
--- /dev/null
+++ b/kud/tests/plugin_edgex.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+#set -o xtrace
+
+source _functions.sh
+
+base_url="http://localhost:8081/v1/vnf_instances/"
+cloud_region_id="kud"
+namespace="default"
+csar_id=cb009bfe-bbee-11e8-9766-525400435678
+
+# Setup
+_checks_args ${csar_id}
+cp -R ./edgex/* ${CSAR_DIR}/${csar_id}/
+
+# Test
+payload_raw="
+{
+ \"cloud_region_id\": \"$cloud_region_id\",
+ \"namespace\": \"$namespace\",
+ \"csar_id\": \"$csar_id\"
+}
+"
+payload=$(echo $payload_raw | tr '\n' ' ')
+
+echo "Creating EdgeX VNF Instance"
+
+vnf_id=$(curl -s -d "$payload" "${base_url}" | jq -r '.vnf_id')
+
+echo "=== Validating Kubernetes ==="
+kubectl get --no-headers=true --namespace=${namespace} deployment ${cloud_region_id}-${namespace}-${vnf_id}-edgex-core-command
+kubectl get --no-headers=true --namespace=${namespace} service ${cloud_region_id}-${namespace}-${vnf_id}-edgex-core-command
+echo "VNF Instance created succesfully with id: $vnf_id"
+
+# TODO: Add heath checks to verify EdgeX services
+
+vnf_id_list=$(curl -s -X GET "${base_url}${cloud_region_id}/${namespace}" | jq -r '.vnf_id_list')
+if [[ "$vnf_id_list" != *"${vnf_id}"* ]]; then
+ echo $vnf_id_list
+ echo "VNF Instance not stored"
+ exit 1
+fi
+
+vnf_details=$(curl -s -X GET "${base_url}${cloud_region_id}/${namespace}/${vnf_id}")
+if [[ -z "$vnf_details" ]]; then
+ echo "Cannot retrieved VNF Instance details"
+ exit 1
+fi
+echo "VNF details $vnf_details"
+
+echo "Deleting $vnf_id VNF Instance"
+curl -X DELETE "${base_url}${cloud_region_id}/${namespace}/${vnf_id}"
+if [[ -n $(curl -s -X GET "${base_url}${cloud_region_id}/${namespace}/${vnf_id}") ]]; then
+ echo "VNF Instance not deleted"
+ exit 1
+fi
+
+# Teardown
diff --git a/kud/tests/vFW/README.md b/kud/tests/vFW/README.md
new file mode 100644
index 00000000..f54a555f
--- /dev/null
+++ b/kud/tests/vFW/README.md
@@ -0,0 +1,50 @@
+# vFirewall ONAP Use Case
+
+This use case is composed of three virtual functions (VFs) running in
+separate Ubuntu Virtual Machines:
+
+ * [Packet generator][1]: Sends packets to the packet sink through the
+firewall. This includes a script that periodically generates different
+volumes of traffic.
+ * [Firewall][2]: Reports the volume of traffic passing though to the
+ONAP DCAE collector.
+ * [Traffic sink][3]: Displays the traffic volume that lands at the sink
+VM using the link http://192.168.20.250:667 through your browser
+and enable automatic page refresh by clicking the "Off" button. You
+can see the traffic volume in the charts.
+
+![Diagram](diagram.png)
+
+## Adjust packet generator:
+
+The packet generator contains 10 streams: fw\_udp1, fw\_udp2,
+fw\_udp3, . . . , fw\_udp10. Each stream generates 100 packets every
+10 seconds. The */opt/run\_traffic\_fw\_demo.sh* script on the packet
+generator VM starts automatically and alternates high traffic (i.e.
+10 active streams at the same time) and low traffic (1 active stream)
+every 5 minutes.
+
+To enable a stream, include `{"id":"fw_udp1", "is-enabled":"true"}`
+in the *pg-stream* bracket.
+
+To adjust the traffic volume produced by the packet generator, run the
+following command in a shell:
+
+```
+ curl -X PUT \
+ -H "Authorization: Basic YWRtaW46YWRtaW4=" \
+ -H "Content-Type: application/json" \
+ -H "Cache-Control: no-cache" \
+ -d '{"pg-streams":{"pg-stream": [{"id":"fw_udp1", "is-enabled":"true"},{"id":"fw_udp2", "is-enabled":"true"},{"id":"fw_udp3", "is-enabled":"true"},{"id":"fw_udp4", "is-enabled":"true"},{"id":"fw_udp5", "is-enabled":"true"}]}}' \
+ "http://192.168.10.200:8183/restconf/config/sample-plugin:sample-plugin/pg-streams"
+```
+
+The command above enables 5 streams.
+
+## License
+
+Apache-2.0
+
+[1]: packetgen
+[2]: firewall
+[3]: sink
diff --git a/kud/tests/vFW/Vagrantfile b/kud/tests/vFW/Vagrantfile
new file mode 100644
index 00000000..cabe6504
--- /dev/null
+++ b/kud/tests/vFW/Vagrantfile
@@ -0,0 +1,66 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+vars = {
+ "demo_artifacts_version" => "1.3.0",
+ 'vfw_private_ip_0' => '192.168.10.100',
+ 'vfw_private_ip_1' => '192.168.20.100',
+ 'vfw_private_ip_2' => '10.10.100.2',
+ 'vpg_private_ip_0' => '192.168.10.200',
+ 'vpg_private_ip_1' => '10.0.100.3',
+ 'vsn_private_ip_0' => '192.168.20.250',
+ 'vsn_private_ip_1' => '10.10.100.4',
+ 'dcae_collector_ip' => '10.0.4.1',
+ 'dcae_collector_port' => '8081',
+ 'protected_net_gw' => '192.168.20.100',
+ 'protected_net_cidr' => '192.168.20.0/24',
+ 'protected_private_net_cidr' => '192.168.10.0/24',
+ 'onap_private_net_cidr' => '10.10.0.0/16'
+}
+
+if ENV['no_proxy'] != nil or ENV['NO_PROXY']
+ $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
+ $subnet = "192.168.121"
+ # NOTE: This range is based on vagrant-libivirt network definition
+ (1..27).each do |i|
+ $no_proxy += ",#{$subnet}.#{i}"
+ end
+end
+
+Vagrant.configure("2") do |config|
+ config.vm.box = "elastic/ubuntu-16.04-x86_64"
+
+ if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil
+ if not Vagrant.has_plugin?('vagrant-proxyconf')
+ system 'vagrant plugin install vagrant-proxyconf'
+ raise 'vagrant-proxyconf was installed but it requires to execute again'
+ end
+ config.proxy.http = ENV['http_proxy'] || ENV['HTTP_PROXY'] || ""
+ config.proxy.https = ENV['https_proxy'] || ENV['HTTPS_PROXY'] || ""
+ config.proxy.no_proxy = $no_proxy
+ end
+
+ config.vm.provider 'libvirt' do |v|
+ v.cpu_mode = 'host-passthrough' # DPDK requires Supplemental Streaming SIMD Extensions 3 (SSSE3)
+ end
+
+ config.vm.define :packetgen do |packetgen|
+ packetgen.vm.hostname = "packetgen"
+ packetgen.vm.provision 'shell', path: 'packetgen', env: vars
+ packetgen.vm.network :private_network, :ip => vars['vpg_private_ip_0'], :type => :static, :netmask => "255.255.255.0" # unprotected_private_net_cidr
+ packetgen.vm.network :private_network, :ip => vars['vpg_private_ip_1'], :type => :static, :netmask => "255.255.0.0" # onap_private_net_cidr
+ end
+ config.vm.define :firewall do |firewall|
+ firewall.vm.hostname = "firewall"
+ firewall.vm.provision 'shell', path: 'firewall', env: vars
+ firewall.vm.network :private_network, :ip => vars['vfw_private_ip_0'], :type => :static, :netmask => "255.255.255.0" # unprotected_private_net_cidr
+ firewall.vm.network :private_network, :ip => vars['vfw_private_ip_1'], :type => :static, :netmask => "255.255.255.0" # protected_private_net_cidr
+ firewall.vm.network :private_network, :ip => vars['vfw_private_ip_2'], :type => :static, :netmask => "255.255.0.0" # onap_private_net_cidr
+ end
+ config.vm.define :sink do |sink|
+ sink.vm.hostname = "sink"
+ sink.vm.provision 'shell', path: 'sink', env: vars
+ sink.vm.network :private_network, :ip => vars['vsn_private_ip_0'], :type => :static, :netmask => "255.255.255.0" # protected_private_net_cidr
+ sink.vm.network :private_network, :ip => vars['vsn_private_ip_1'], :type => :static, :netmask => "255.255.0.0" # onap_private_net_cidr
+ end
+end
diff --git a/kud/tests/vFW/diagram.png b/kud/tests/vFW/diagram.png
new file mode 100644
index 00000000..4cf95f2f
--- /dev/null
+++ b/kud/tests/vFW/diagram.png
Binary files differ
diff --git a/kud/tests/vFW/firewall b/kud/tests/vFW/firewall
new file mode 100755
index 00000000..93d4f2a3
--- /dev/null
+++ b/kud/tests/vFW/firewall
@@ -0,0 +1,96 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o nounset
+set -o pipefail
+set -o xtrace
+set -o errexit
+
+# install_dependencies() - Install required dependencies
+function install_dependencies {
+ apt-get update
+ apt-get install -y -qq wget openjdk-8-jre bridge-utils net-tools bsdmainutils make gcc libcurl4-gnutls-dev
+}
+
+# install_vpp() - Install VPP
+function install_vpp {
+ local RELEASE=".stable.1609"
+
+ apt-get update
+ apt-get install -y -qq apt-transport-https
+ echo "deb [trusted=yes] https://nexus.fd.io/content/repositories/fd.io$RELEASE.ubuntu.$(lsb_release -c -s).main/ ./" | tee -a /etc/apt/sources.list.d/99fd.io.list
+ apt-get update
+ apt-get install -y -qq vpp vpp-lib vpp-plugins vpp-dpdk-dkms
+}
+
+function _untar_url {
+ local repo_url="https://nexus.onap.org/content/repositories/staging/org/onap/demo/vnf/"
+ local file_subpath=$1
+
+ wget -q -O tmp_file.tar.gz "${repo_url}/${file_subpath}"
+ sha1=$(wget ${repo_url}/${file_subpath}.sha1 -q -O -)
+ if [[ $(sha1sum tmp_file.tar.gz | awk '{print $1}') != "$sha1" ]]; then
+ echo "The downloaded file is corrupted"
+ exit 1
+ fi
+ tar -zmxf tmp_file.tar.gz
+ rm tmp_file.tar.gz
+}
+
+# install_vfw_scripts() -
+function install_vfw_scripts {
+ local version=$(cat /opt/config/demo_artifacts_version.txt)
+ local ves_path=VES
+ local ves_reporting_path="${ves_path}/evel/evel-library"
+
+ pushd /opt
+ wget -q https://git.onap.org/demo/plain/vnfs/vFW/scripts/{v_firewall_init,vfirewall}.sh
+ chmod +x *.sh
+
+ _untar_url "sample-distribution/${version}/sample-distribution-${version}-hc.tar.gz"
+ mkdir -p honeycomb
+ mv sample-distribution-$version honeycomb
+
+ _untar_url "ves5/ves/${version}/ves-${version}-demo.tar.gz"
+ mv ves-$version $ves_path
+
+ _untar_url "ves5/ves_vfw_reporting/${version}/ves_vfw_reporting-${version}-demo.tar.gz"
+ mkdir -p $ves_reporting_path/code
+ mv ves_vfw_reporting-$version $ves_reporting_path/code/VESreporting
+
+ chmod +x $ves_reporting_path/code/VESreporting/go-client.sh
+ pushd $ves_reporting_path/bldjobs/
+ make clean
+ make
+ sleep 1
+ popd
+
+ # TODO(electrocucaracha) Fix it in upstream
+ sed -i 's/start vpp/systemctl start vpp/g' v_firewall_init.sh
+ mv vfirewall.sh /etc/init.d
+ update-rc.d vfirewall.sh defaults
+ systemctl start firewall
+ popd
+}
+
+mkdir -p /opt/config/
+echo "$protected_net_cidr" > /opt/config/protected_net_cidr.txt
+echo "$vfw_private_ip_0" > /opt/config/fw_ipaddr.txt
+echo "$vsn_private_ip_0" > /opt/config/sink_ipaddr.txt
+echo "$demo_artifacts_version" > /opt/config/demo_artifacts_version.txt
+echo "$dcae_collector_ip" > /opt/config/dcae_collector_ip.txt
+echo "$dcae_collector_port" > /opt/config/dcae_collector_port.txt
+
+echo 'vm.nr_hugepages = 1024' >> /etc/sysctl.conf
+sysctl -p
+
+install_dependencies
+install_vpp
+install_vfw_scripts
diff --git a/kud/tests/vFW/packetgen b/kud/tests/vFW/packetgen
new file mode 100755
index 00000000..51d5c676
--- /dev/null
+++ b/kud/tests/vFW/packetgen
@@ -0,0 +1,83 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o nounset
+set -o pipefail
+set -o xtrace
+set -o errexit
+
+# install_dependencies() - Install required dependencies
+function install_dependencies {
+ apt-get update
+ apt-get install -y -qq wget openjdk-8-jre bridge-utils net-tools bsdmainutils
+}
+
+# install_vpp() - Install VPP
+function install_vpp {
+ local RELEASE=".stable.1609"
+
+ apt-get update
+ apt-get install -y -qq apt-transport-https
+ echo "deb [trusted=yes] https://nexus.fd.io/content/repositories/fd.io$RELEASE.ubuntu.$(lsb_release -c -s).main/ ./" | tee -a /etc/apt/sources.list.d/99fd.io.list
+ apt-get update
+ apt-get install -y -qq vpp vpp-lib vpp-plugins vpp-dpdk-dkms
+}
+
+function _untar_url {
+ local repo_url="https://nexus.onap.org/content/repositories/staging/org/onap/demo/vnf/"
+ local file_subpath=$1
+
+ wget -q -O tmp_file.tar.gz "${repo_url}/${file_subpath}"
+ sha1=$(wget ${repo_url}/${file_subpath}.sha1 -q -O -)
+ if [[ $(sha1sum tmp_file.tar.gz | awk '{print $1}') != "$sha1" ]]; then
+ echo "The downloaded file is corrupted"
+ exit 1
+ fi
+ tar -zmxf tmp_file.tar.gz
+ rm tmp_file.tar.gz
+}
+
+# install_vfw_scripts() -
+function install_vfw_scripts {
+ local version=$(cat /opt/config/demo_artifacts_version.txt)
+
+ pushd /opt
+ wget -q https://git.onap.org/demo/plain/vnfs/vFW/scripts/{v_packetgen_init,vpacketgen,run_traffic_fw_demo}.sh
+ chmod +x *.sh
+
+ _untar_url "sample-distribution/${version}/sample-distribution-${version}-hc.tar.gz"
+ mv sample-distribution-$version honeycomb
+
+ _untar_url "vfw/vfw_pg_streams/$version/vfw_pg_streams-$version-demo.tar.gz"
+ mv vfw_pg_streams-$version pg_streams
+
+ sed -i 's/"restconf-binding-address": "127.0.0.1",/"restconf-binding-address": "0.0.0.0",/g' /opt/honeycomb/config/honeycomb.json
+
+ # TODO(electrocucaracha) Fix it in upstream
+ sed -i 's/start vpp/systemctl start vpp/g' v_packetgen_init.sh
+ sed -i 's|/opt/honeycomb/sample-distribution-\$VERSION/honeycomb|/opt/honeycomb/honeycomb|g' v_packetgen_init.sh
+ mv vpacketgen.sh /etc/init.d/
+ update-rc.d vpacketgen.sh defaults
+ systemctl start packetgen
+ popd
+}
+
+mkdir -p /opt/config/
+echo "$protected_net_cidr" > /opt/config/protected_net_cidr.txt
+echo "$vfw_private_ip_0" > /opt/config/fw_ipaddr.txt
+echo "$vsn_private_ip_0" > /opt/config/sink_ipaddr.txt
+echo "$demo_artifacts_version" > /opt/config/demo_artifacts_version.txt
+
+echo 'vm.nr_hugepages = 1024' >> /etc/sysctl.conf
+sysctl -p
+
+install_dependencies
+install_vpp
+install_vfw_scripts
diff --git a/kud/tests/vFW/sink b/kud/tests/vFW/sink
new file mode 100755
index 00000000..5604198f
--- /dev/null
+++ b/kud/tests/vFW/sink
@@ -0,0 +1,48 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o nounset
+set -o pipefail
+set -o xtrace
+set -o errexit
+
+# install_dependencies() - Install required dependencies
+function install_dependencies {
+ apt-get update
+ apt install -y wget darkstat net-tools
+
+ # Configure and run Darkstat
+ sed -i "s/START_DARKSTAT=.*/START_DARKSTAT=yes/g;s/INTERFACE=.*/INTERFACE=\"-i eth1\"/g" /etc/darkstat/init.cfg
+
+ systemctl restart darkstat
+}
+
+# install_vfw_scripts() -
+function install_vfw_scripts {
+ pushd /opt
+ wget -q https://git.onap.org/demo/plain/vnfs/vFW/scripts/{v_sink_init,vsink}.sh
+ chmod +x *.sh
+
+ mv vsink.sh /etc/init.d
+ update-rc.d vsink.sh defaults
+ systemctl start sink
+ popd
+}
+
+mkdir -p /opt/config/
+echo "$protected_net_cidr" > /opt/config/protected_net_cidr.txt
+echo "$vfw_private_ip_0" > /opt/config/fw_ipaddr.txt
+echo "$vsn_private_ip_0" > /opt/config/sink_ipaddr.txt
+echo "$demo_artifacts_version" > /opt/config/demo_artifacts_version.txt
+echo "$protected_net_gw" > /opt/config/protected_net_gw.txt
+echo "$protected_private_net_cidr" > /opt/config/unprotected_net.txt
+
+install_dependencies
+install_vfw_scripts
diff --git a/kud/tests/virtlet.sh b/kud/tests/virtlet.sh
new file mode 100755
index 00000000..04c1276d
--- /dev/null
+++ b/kud/tests/virtlet.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _common.sh
+source _functions.sh
+
+csar_id=6b54a728-b76a-11e8-a1ba-52540053ccc8
+
+# Setup
+populate_CSAR_virtlet $csar_id
+
+pushd ${CSAR_DIR}/${csar_id}
+
+setup $virtlet_deployment_name
+
+# Test
+deployment_pod=$(kubectl get pods | grep $virtlet_deployment_name | awk '{print $1}')
+vm_name=$(kubectl virt virsh list | grep "virtlet-.*-$virtlet_deployment_name" | awk '{print $2}')
+vm_status=$(kubectl virt virsh list | grep "virtlet-.*-$virtlet_deployment_name" | awk '{print $3}')
+if [[ "$vm_status" != "running" ]]; then
+ echo "There is no Virtual Machine running by $deployment_pod pod"
+ exit 1
+fi
+echo "Pod name: $deployment_pod Virsh domain: $vm_name"
+echo "ssh testuser@$(kubectl get pods $deployment_pod -o jsonpath="{.status.podIP}")"
+echo "kubectl attach -it $deployment_pod"
+echo "=== Virtlet details ===="
+echo "$(kubectl virt virsh dumpxml $vm_name | grep VIRTLET_)\n"
+popd
+
+# Teardown
+teardown $virtlet_deployment_name
diff --git a/kud/tests/vnfs/edgex/helm/edgex/Chart.yaml b/kud/tests/vnfs/edgex/helm/edgex/Chart.yaml
new file mode 100644
index 00000000..d52b1925
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: EdgeX foundry Helm Charts for testing purposes
+name: edgex
+version: 1.0.0 \ No newline at end of file
diff --git a/kud/tests/vnfs/edgex/helm/edgex/charts/consul/Chart.yaml b/kud/tests/vnfs/edgex/helm/edgex/charts/consul/Chart.yaml
new file mode 100644
index 00000000..74fa86ca
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/charts/consul/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: EdgeX foundry Consul Helm Charts for testing purposes
+name: edgex-consul
+version: 1.0.0
diff --git a/kud/tests/vnfs/edgex/helm/edgex/charts/consul/templates/consul-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/charts/consul/templates/consul-deployment.yaml
new file mode 100644
index 00000000..5db9b855
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/charts/consul/templates/consul-deployment.yaml
@@ -0,0 +1,50 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: edgex-core-consul
+ labels:
+ app: edgex-core-consul
+ release: {{ .Release.Name }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-core-consul
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-core-consul
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-core-consul:latest
+ name: edgex-core-consul
+ ports:
+ {{- range $k, $v := .Values.service.ports }}
+ - containerPort: {{ $v.port }}
+ {{- end }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-edgex-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-edgex-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-edgex-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-edgex-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/charts/consul/templates/consul-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/charts/consul/templates/consul-service.yaml
new file mode 100644
index 00000000..783edef8
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/charts/consul/templates/consul-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.name }}
+spec:
+ ports:
+ {{- range $k, $v := .Values.service.ports }}
+ - name: {{ $v.portName }}
+ port: {{ $v.port }}
+ targetPort: {{ $v.port }}
+ {{- end }}
+ selector:
+ app: edgex-core-consul
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/charts/consul/values.yaml b/kud/tests/vnfs/edgex/helm/edgex/charts/consul/values.yaml
new file mode 100644
index 00000000..bf7732df
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/charts/consul/values.yaml
@@ -0,0 +1,76 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ persistence: {}
+#################################################################
+# Application configuration defaults.
+#################################################################
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+readiness:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+
+service:
+ name: edgex-core-consul
+ ports:
+ - portName: consul-port-1
+ port: 8400
+ - portName: consul-port-2
+ port: 8500
+ - portName: consul-port-3
+ port: 8600
+
+ingress:
+ enabled: false
+
+# Configure resource requests and limits
+flavor: small
+resources:
+ small:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 10Mi
+ large:
+ limits:
+ cpu: 400m
+ memory: 1Gi
+ requests:
+ cpu: 10m
+ memory: 100Mi
+ unlimited: {} \ No newline at end of file
diff --git a/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/Chart.yaml b/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/Chart.yaml
new file mode 100644
index 00000000..19c4a324
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: EdgeX foundry Mongo Helm Charts for testing purposes
+name: edgex-mongo
+version: 1.0.0
diff --git a/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/templates/mongo-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/templates/mongo-deployment.yaml
new file mode 100644
index 00000000..3251309e
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/templates/mongo-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-mongo
+ release: {{ .Release.Name }}
+ name: edgex-mongo
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-mongo
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-mongo
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-edgex-mongo:0.2
+ name: edgex-mongo
+ ports:
+ - containerPort: {{ .Values.service.port }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-edgex-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-edgex-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-edgex-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ .Release.Name }}-edgex-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/templates/mongo-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/templates/mongo-service.yaml
new file mode 100644
index 00000000..5b30a45f
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/templates/mongo-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.portName }}
+ port: {{ .Values.service.port }}
+ targetPort: {{ .Values.service.port }}
+ selector:
+ app: edgex-mongo
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/values.yaml b/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/values.yaml
new file mode 100644
index 00000000..e22b846a
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/charts/mongo/values.yaml
@@ -0,0 +1,71 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ persistence: {}
+#################################################################
+# Application configuration defaults.
+#################################################################
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+readiness:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+
+service:
+ name: edgex-mongo
+ portName: mongo
+ port: 27017
+
+ingress:
+ enabled: false
+
+# Configure resource requests and limits
+flavor: small
+resources:
+ small:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 10Mi
+ large:
+ limits:
+ cpu: 400m
+ memory: 1Gi
+ requests:
+ cpu: 10m
+ memory: 100Mi
+ unlimited: {} \ No newline at end of file
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/command-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/command-deployment.yaml
new file mode 100644
index 00000000..1b43128c
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/command-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-core-command
+ release: {{ .Release.Name }}
+ name: edgex-core-command
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-core-command
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-core-command
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-core-command:0.2.1
+ name: edgex-core-command
+ ports:
+ - containerPort: {{ .Values.service.command.port }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/command-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/command-service.yaml
new file mode 100644
index 00000000..7ffc7cce
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/command-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.command.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.command.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.command.portName }}
+ port: {{ .Values.service.command.port }}
+ targetPort: {{ .Values.service.command.port }}
+ selector:
+ app: edgex-core-command
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/data-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/data-deployment.yaml
new file mode 100644
index 00000000..81a1e3b9
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/data-deployment.yaml
@@ -0,0 +1,50 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-core-data
+ release: {{ .Release.Name }}
+ name: edgex-core-data
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-core-data
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-core-data
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-core-data:0.2.1
+ name: edgex-core-data
+ ports:
+ {{- range $key, $val := .Values.service.data.ports }}
+ - containerPort: {{ $val.port }}
+ {{- end }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/data-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/data-service.yaml
new file mode 100644
index 00000000..c7f68798
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/data-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.data.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.data.name }}
+spec:
+ ports:
+ {{- range $key, $val := .Values.service.data.ports }}
+ - name: {{ $val.portName }}
+ port: {{ $val.port }}
+ targetPort: {{ $val.port }}
+ {{- end }}
+ selector:
+ app: edgex-core-data
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/device-bluetooth-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/device-bluetooth-deployment.yaml
new file mode 100644
index 00000000..52ab77e8
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/device-bluetooth-deployment.yaml
@@ -0,0 +1,51 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-device-bluetooth
+ release: {{ .Release.Name }}
+ name: edgex-device-bluetooth
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-device-bluetooth
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: edgex-device-bluetooth
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-device-bluetooth:0.2.1
+ name: edgex-device-bluetooth
+ ports:
+ - containerPort: {{ .Values.service.deviceBluetooth.port }}
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/device-bluetooth-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/device-bluetooth-service.yaml
new file mode 100644
index 00000000..dbbb1e39
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/device-bluetooth-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.deviceBluetooth.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.deviceBluetooth.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.deviceBluetooth.portName }}
+ port: {{ .Values.service.deviceBluetooth.port }}
+ targetPort: {{ .Values.service.deviceBluetooth.port }}
+ selector:
+ app: edgex-device-bluetooth
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/export-client-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/export-client-deployment.yaml
new file mode 100644
index 00000000..0d60b419
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/export-client-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-export-client
+ release: {{ .Release.Name }}
+ name: edgex-export-client
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-export-client
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-export-client
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-export-client:0.2.1
+ name: edgex-export-client
+ ports:
+ - containerPort: {{ .Values.service.exportClient.port }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/export-client-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/export-client-service.yaml
new file mode 100644
index 00000000..ec44fd51
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/export-client-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.exportClient.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.exportClient.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.exportClient.portName }}
+ port: {{ .Values.service.exportClient.port }}
+ targetPort: {{ .Values.service.exportClient.port }}
+ selector:
+ app: edgex-export-client
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/export-distro-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/export-distro-deployment.yaml
new file mode 100644
index 00000000..4cf17dfe
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/export-distro-deployment.yaml
@@ -0,0 +1,50 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-export-distro
+ release: {{ .Release.Name }}
+ name: edgex-export-distro
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-export-distro
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-export-distro
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-export-distro:0.2.1
+ name: edgex-export-distro
+ ports:
+ {{- range $key, $val := .Values.service.exportDistro.ports }}
+ - containerPort: {{ $val.port }}
+ {{- end }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/export-distro-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/export-distro-service.yaml
new file mode 100644
index 00000000..70624abf
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/export-distro-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.exportDistro.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.exportDistro.name }}
+spec:
+ ports:
+ {{- range $key, $val := .Values.service.exportDistro.ports }}
+ - name: {{ $val.portName }}
+ port: {{ $val.port }}
+ targetPort: {{ $val.port }}
+ {{- end }}
+ selector:
+ app: edgex-export-distro
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/logging-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/logging-deployment.yaml
new file mode 100644
index 00000000..a60e26f1
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/logging-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-support-logging
+ release: {{ .Release.Name }}
+ name: edgex-support-logging
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-support-logging
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-support-logging
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-support-logging:0.2.1
+ name: edgex-support-logging
+ ports:
+ - containerPort: {{ .Values.service.logging.port }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/logging-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/logging-service.yaml
new file mode 100644
index 00000000..32c85908
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/logging-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.logging.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.logging.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.logging.portName }}
+ port: {{ .Values.service.logging.port }}
+ targetPort: {{ .Values.service.logging.port }}
+ selector:
+ app: edgex-support-logging
+ release: {{ .Release.Name }} \ No newline at end of file
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/metadata-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/metadata-deployment.yaml
new file mode 100644
index 00000000..6ce913fa
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/metadata-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-core-metadata
+ release: {{ .Release.Name }}
+ name: edgex-core-metadata
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-core-metadata
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-core-metadata
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-core-metadata:0.2.1
+ name: edgex-core-metadata
+ ports:
+ - containerPort: {{ .Values.service.metadata.port }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/metadata-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/metadata-service.yaml
new file mode 100644
index 00000000..8d03118f
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/metadata-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.metadata.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.metadata.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.metadata.portName }}
+ port: {{ .Values.service.metadata.port }}
+ targetPort: {{ .Values.service.metadata.port }}
+ selector:
+ app: edgex-core-metadata
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/notifications-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/notifications-deployment.yaml
new file mode 100644
index 00000000..d736d14a
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/notifications-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-support-notifications
+ release: {{ .Release.Name }}
+ name: edgex-support-notifications
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-support-notifications
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-support-notifications
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-support-notifications:0.2
+ name: edgex-support-notifications
+ ports:
+ - containerPort: {{ .Values.service.notifications.port }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/notifications-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/notifications-service.yaml
new file mode 100644
index 00000000..d9afefc5
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/notifications-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.notifications.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.notifications.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.notifications.portName }}
+ port: {{ .Values.service.notifications.port }}
+ targetPort: {{ .Values.service.notifications.port }}
+ selector:
+ app: edgex-support-notifications
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/pv.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/pv.yaml
new file mode 100644
index 00000000..aa07549d
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/pv.yaml
@@ -0,0 +1,99 @@
+{{/*
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if .Values.persistence.enabled -}}
+
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ capacity:
+ storage: {{ .Values.persistence.size }}
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPathData }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ capacity:
+ storage: {{ .Values.persistence.size }}
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPathLogs }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ capacity:
+ storage: {{ .Values.persistence.size }}
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPathConsulConf }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ capacity:
+ storage: {{ .Values.persistence.size }}
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPathConsulData }}
+
+{{- end -}} \ No newline at end of file
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/pvc.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/pvc.yaml
new file mode 100644
index 00000000..7c31733e
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/pvc.yaml
@@ -0,0 +1,143 @@
+{{/*
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if .Values.persistence.enabled -}}
+
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Chart.Name }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ name: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+
+{{- end -}} \ No newline at end of file
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/rulesengine-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/rulesengine-deployment.yaml
new file mode 100644
index 00000000..0f3e1384
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/rulesengine-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-support-rulesengine
+ release: {{ .Release.Name }}
+ name: edgex-support-rulesengine
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-support-rulesengine
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-support-rulesengine
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-support-rulesengine:0.2.1
+ name: edgex-support-rulesengine
+ ports:
+ - containerPort: {{ .Values.service.rulesengine.port }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/rulesengine-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/rulesengine-service.yaml
new file mode 100644
index 00000000..756ad423
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/rulesengine-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.rulesengine.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.rulesengine.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.rulesengine.portName }}
+ port: {{ .Values.service.rulesengine.port }}
+ targetPort: {{ .Values.service.rulesengine.port }}
+ selector:
+ app: edgex-support-rulesengine
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/scheduler-deployment.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/scheduler-deployment.yaml
new file mode 100644
index 00000000..1bff0521
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/scheduler-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: edgex-support-scheduler
+ release: {{ .Release.Name }}
+ name: edgex-support-scheduler
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: edgex-support-scheduler
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: edgex-support-scheduler
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: edgexfoundry/docker-support-scheduler:0.2.1
+ name: edgex-support-scheduler
+ ports:
+ - containerPort: {{ .Values.service.scheduler.port }}
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-data
+ - name: edgex-logs
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-logs
+ - name: consul-config
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-config
+ - name: consul-data
+ persistentVolumeClaim:
+ claimName: {{ printf "%s-%s" .Release.Name .Chart.Name }}-consul-data
diff --git a/kud/tests/vnfs/edgex/helm/edgex/templates/scheduler-service.yaml b/kud/tests/vnfs/edgex/helm/edgex/templates/scheduler-service.yaml
new file mode 100644
index 00000000..a5311829
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/templates/scheduler-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: {{ .Values.service.scheduler.name }}
+ release: {{ .Release.Name }}
+ name: {{ .Values.service.scheduler.name }}
+spec:
+ ports:
+ - name: {{ .Values.service.scheduler.portName }}
+ port: {{ .Values.service.scheduler.port }}
+ targetPort: {{ .Values.service.scheduler.port }}
+ selector:
+ app: edgex-support-scheduler
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/edgex/helm/edgex/values.yaml b/kud/tests/vnfs/edgex/helm/edgex/values.yaml
new file mode 100644
index 00000000..90a0068d
--- /dev/null
+++ b/kud/tests/vnfs/edgex/helm/edgex/values.yaml
@@ -0,0 +1,125 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ persistence: {}
+#################################################################
+# Application configuration defaults.
+#################################################################
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+readiness:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+
+service:
+ command:
+ name: edgex-core-command
+ portName: core-command
+ port: 48082
+ data:
+ name: edgex-core-data
+ ports:
+ - portName: data-port-1
+ port: 48080
+ - portName: data-port-2
+ port: 5563
+ deviceBluetooth:
+ name: edgex-device-bluetooth
+ portName: device-bluetooth
+ port: 49988
+ exportClient:
+ name: edgex-export-client
+ portName: export-client
+ port: 48071
+ exportDistro:
+ name: edgex-export-distro
+ ports:
+ - portName: export-distro-port1
+ port: 48070
+ - portName: export-distro-port2
+ port: 5566
+ logging:
+ name: edgex-support-logging
+ portName: logging
+ port: 48061
+ metadata:
+ name: edgex-core-metadata
+ portName: metadata
+ port: 48081
+ notifications:
+ name: edgex-support-notifications
+ portName: notifications
+ port: 48060
+ rulesengine:
+ name: edgex-support-rulesengine
+ portName: rulesengine
+ port: 48075
+ scheduler:
+ name: edgex-support-scheduler
+ portName: scheduler
+ port: 48085
+
+persistence:
+ enabled: true
+ volumeReclaimPolicy: Retain
+ accessMode: ReadWriteOnce
+ size: 1Gi
+ mountPath: /dockerdata-nfs
+ mountSubPathData: vnfs/edgex/data/db
+ mountSubPathLogs: vnfs/edgex/logs
+ mountSubPathConsulConf: vnfs/edgex/consul/config
+ mountSubPathConsulData: vnfs/edgex/consul/data
+
+ingress:
+ enabled: false
+
+# Configure resource requests and limits
+flavor: small
+resources:
+ small:
+ limits:
+ cpu: 200m
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 10Mi
+ large:
+ limits:
+ cpu: 400m
+ memory: 1Gi
+ requests:
+ cpu: 10m
+ memory: 100Mi
+ unlimited: {} \ No newline at end of file
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/command-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/command-deployment.yaml
new file mode 100644
index 00000000..ba4e3c12
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/command-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-core-command
+ name: edgex-core-command
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-core-command
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-core-command
+ spec:
+ containers:
+ - image: edgexfoundry/docker-core-command:0.2.1
+ name: edgex-core-command
+ ports:
+ - containerPort: 48082
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/consul-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/consul-deployment.yaml
new file mode 100644
index 00000000..157a2130
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/consul-deployment.yaml
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-core-consul
+ name: edgex-core-consul
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-core-consul
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-core-consul
+ spec:
+ containers:
+ - image: edgexfoundry/docker-core-consul:latest
+ name: edgex-core-consul
+ ports:
+ - containerPort: 8400
+ - containerPort: 8500
+ - containerPort: 8600
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/data-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/data-deployment.yaml
new file mode 100644
index 00000000..64d28a4c
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/data-deployment.yaml
@@ -0,0 +1,47 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-core-data
+ name: edgex-core-data
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-core-data
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-core-data
+ spec:
+ containers:
+ - image: edgexfoundry/docker-core-data:0.2.1
+ name: edgex-core-data
+ ports:
+ - containerPort: 48080
+ - containerPort: 5563
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/device-bluetooth-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/device-bluetooth-deployment.yaml
new file mode 100644
index 00000000..9dc96785
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/device-bluetooth-deployment.yaml
@@ -0,0 +1,49 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-device-bluetooth
+ name: edgex-device-bluetooth
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-device-bluetooth
+ replicas: 1
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ io.kompose.service: edgex-device-bluetooth
+ spec:
+ containers:
+ - image: edgexfoundry/docker-device-bluetooth:0.2.1
+ name: edgex-device-bluetooth
+ ports:
+ - containerPort: 49988
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/export-client-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/export-client-deployment.yaml
new file mode 100644
index 00000000..191abc4f
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/export-client-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-export-client
+ name: edgex-export-client
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-export-client
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-export-client
+ spec:
+ containers:
+ - image: edgexfoundry/docker-export-client:0.2.1
+ name: edgex-export-client
+ ports:
+ - containerPort: 48071
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/export-distro-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/export-distro-deployment.yaml
new file mode 100644
index 00000000..ff0d880e
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/export-distro-deployment.yaml
@@ -0,0 +1,47 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-export-distro
+ name: edgex-export-distro
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-export-distro
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-export-distro
+ spec:
+ containers:
+ - image: edgexfoundry/docker-export-distro:0.2.1
+ name: edgex-export-distro
+ ports:
+ - containerPort: 48070
+ - containerPort: 5566
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/logging-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/logging-deployment.yaml
new file mode 100644
index 00000000..a52085bc
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/logging-deployment.yaml
@@ -0,0 +1,47 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-support-logging
+ name: edgex-support-logging
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-support-logging
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-support-logging
+ spec:
+ containers:
+ - image: edgexfoundry/docker-support-logging:0.2.1
+ name: edgex-support-logging
+ ports:
+ - containerPort: 48061
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
+
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/metadata-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/metadata-deployment.yaml
new file mode 100644
index 00000000..44eb8114
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/metadata-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-core-metadata
+ name: edgex-core-metadata
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-core-metadata
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-core-metadata
+ spec:
+ containers:
+ - image: edgexfoundry/docker-core-metadata:0.2.1
+ name: edgex-core-metadata
+ ports:
+ - containerPort: 48081
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/mongo-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/mongo-deployment.yaml
new file mode 100644
index 00000000..26df5f02
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/mongo-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-mongo
+ name: edgex-mongo
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-mongo
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-mongo
+ spec:
+ containers:
+ - image: edgexfoundry/docker-edgex-mongo:0.2
+ name: edgex-mongo
+ ports:
+ - containerPort: 27017
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/notifications-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/notifications-deployment.yaml
new file mode 100644
index 00000000..447789e3
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/notifications-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-support-notifications
+ name: edgex-support-notifications
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-support-notifications
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-support-notifications
+ spec:
+ containers:
+ - image: edgexfoundry/docker-support-notifications:0.2
+ name: edgex-support-notifications
+ ports:
+ - containerPort: 48060
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/rulesengine-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/rulesengine-deployment.yaml
new file mode 100644
index 00000000..1d49b67a
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/rulesengine-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-support-rulesengine
+ name: rulesengine
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-support-rulesengine
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-support-rulesengine
+ spec:
+ containers:
+ - image: edgexfoundry/docker-support-rulesengine:0.2.1
+ name: edgex-support-rulesengine
+ ports:
+ - containerPort: 48075
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/deployments/scheduler-deployment.yaml b/kud/tests/vnfs/edgex/kubernetes/deployments/scheduler-deployment.yaml
new file mode 100644
index 00000000..94e4226b
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/deployments/scheduler-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ io.kompose.service: edgex-support-scheduler
+ name: edgex-support-scheduler
+spec:
+ selector:
+ matchLabels:
+ io.kompose.service: edgex-support-scheduler
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ io.kompose.service: edgex-support-scheduler
+ spec:
+ containers:
+ - image: edgexfoundry/docker-support-scheduler:0.2.1
+ name: edgex-support-scheduler
+ ports:
+ - containerPort: 48085
+ resources: {}
+ volumeMounts:
+ - mountPath: /data/db
+ name: data-db
+ - mountPath: /edgex/logs
+ name: edgex-logs
+ - mountPath: /consul/config
+ name: consul-config
+ - mountPath: /consul/data
+ name: consul-data
+ restartPolicy: Always
+ volumes:
+ - name: data-db
+ hostPath:
+ path: /data/db
+ - name: edgex-logs
+ hostPath:
+ path: /edgex/logs
+ - name: consul-config
+ hostPath:
+ path: /consul/config
+ - name: consul-data
+ hostPath:
+ path: /consul/data
+status: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/metadata.yaml b/kud/tests/vnfs/edgex/kubernetes/metadata.yaml
new file mode 100644
index 00000000..63f784dc
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/metadata.yaml
@@ -0,0 +1,27 @@
+resources:
+ deployment:
+ - deployments/command-deployment.yaml
+ - deployments/consul-deployment.yaml
+ - deployments/data-deployment.yaml
+ - deployments/device-bluetooth-deployment.yaml
+ - deployments/export-client-deployment.yaml
+ - deployments/export-distro-deployment.yaml
+ - deployments/logging-deployment.yaml
+ - deployments/metadata-deployment.yaml
+ - deployments/mongo-deployment.yaml
+ - deployments/notifications-deployment.yaml
+ - deployments/rulesengine-deployment.yaml
+ - deployments/scheduler-deployment.yaml
+ service:
+ - services/command-service.yaml
+ - services/consul-service.yaml
+ - services/data-service.yaml
+ - services/device-bluetooth-service.yaml
+ - services/export-client-service.yaml
+ - services/export-distro-service.yaml
+ - services/logging-service.yaml
+ - services/metadata-service.yaml
+ - services/mongo-service.yaml
+ - services/notifications-service.yaml
+ - services/rulesengine-service.yaml
+ - services/scheduler-service.yaml \ No newline at end of file
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/command-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/command-service.yaml
new file mode 100644
index 00000000..f8d7d745
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/command-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-core-command
+ name: edgex-core-command
+spec:
+ ports:
+ - name: "48082"
+ port: 48082
+ targetPort: 48082
+ selector:
+ io.kompose.service: edgex-core-command
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/consul-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/consul-service.yaml
new file mode 100644
index 00000000..7b5c6ddc
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/consul-service.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-core-consul
+ name: edgex-core-consul
+spec:
+ ports:
+ - name: "8400"
+ port: 8400
+ targetPort: 8400
+ - name: "8500"
+ port: 8500
+ targetPort: 8500
+ - name: "8600"
+ port: 8600
+ targetPort: 8600
+ selector:
+ io.kompose.service: edgex-core-consul
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/data-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/data-service.yaml
new file mode 100644
index 00000000..fc6e5566
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/data-service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-core-data
+ name: edgex-core-data
+spec:
+ ports:
+ - name: "48080"
+ port: 48080
+ targetPort: 48080
+ - name: "5563"
+ port: 5563
+ targetPort: 5563
+ selector:
+ io.kompose.service: edgex-core-data
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/device-bluetooth-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/device-bluetooth-service.yaml
new file mode 100644
index 00000000..48ffc9ef
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/device-bluetooth-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-device-bluetooth
+ name: edgex-device-bluetooth
+spec:
+ ports:
+ - name: "49988"
+ port: 49988
+ targetPort: 49988
+ selector:
+ io.kompose.service: edgex-device-bluetooth
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/export-client-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/export-client-service.yaml
new file mode 100644
index 00000000..709953cd
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/export-client-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-export-client
+ name: edgex-export-client
+spec:
+ ports:
+ - name: "48071"
+ port: 48071
+ targetPort: 48071
+ selector:
+ io.kompose.service: edgex-export-client
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/export-distro-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/export-distro-service.yaml
new file mode 100644
index 00000000..19d6bf5e
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/export-distro-service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-export-distro
+ name: edgex-export-distro
+spec:
+ ports:
+ - name: "48070"
+ port: 48070
+ targetPort: 48070
+ - name: "5566"
+ port: 5566
+ targetPort: 5566
+ selector:
+ io.kompose.service: edgex-export-distro
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/logging-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/logging-service.yaml
new file mode 100644
index 00000000..a25d9a9e
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/logging-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-support-logging
+ name: edgex-support-logging
+spec:
+ ports:
+ - name: "48061"
+ port: 48061
+ targetPort: 48061
+ selector:
+ io.kompose.service: edgex-support-logging
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/metadata-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/metadata-service.yaml
new file mode 100644
index 00000000..12f76836
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/metadata-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-core-metadata
+ name: edgex-core-metadata
+spec:
+ ports:
+ - name: "48081"
+ port: 48081
+ targetPort: 48081
+ selector:
+ io.kompose.service: edgex-core-metadata
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/mongo-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/mongo-service.yaml
new file mode 100644
index 00000000..0c919b79
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/mongo-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-mongo
+ name: edgex-mongo
+spec:
+ ports:
+ - name: "27017"
+ port: 27017
+ targetPort: 27017
+ selector:
+ io.kompose.service: edgex-mongo
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/notifications-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/notifications-service.yaml
new file mode 100644
index 00000000..3245282b
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/notifications-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-support-notifications
+ name: edgex-support-notifications
+spec:
+ ports:
+ - name: "48060"
+ port: 48060
+ targetPort: 48060
+ selector:
+ io.kompose.service: edgex-support-notifications
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/rulesengine-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/rulesengine-service.yaml
new file mode 100644
index 00000000..238c32ce
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/rulesengine-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-support-rulesengine
+ name: edgex-support-rulesengine
+spec:
+ ports:
+ - name: "48075"
+ port: 48075
+ targetPort: 48075
+ selector:
+ io.kompose.service: edgex-support-rulesengine
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/edgex/kubernetes/services/scheduler-service.yaml b/kud/tests/vnfs/edgex/kubernetes/services/scheduler-service.yaml
new file mode 100644
index 00000000..03ac0818
--- /dev/null
+++ b/kud/tests/vnfs/edgex/kubernetes/services/scheduler-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ io.kompose.service: edgex-support-scheduler
+ name: edgex-support-scheduler
+spec:
+ ports:
+ - name: "48085"
+ port: 48085
+ targetPort: 48085
+ selector:
+ io.kompose.service: edgex-support-scheduler
+status:
+ loadBalancer: {}
diff --git a/kud/tests/vnfs/testrb/helm/profile/manifest.yaml b/kud/tests/vnfs/testrb/helm/profile/manifest.yaml
new file mode 100644
index 00000000..ef260633
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/profile/manifest.yaml
@@ -0,0 +1,7 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
+ configresource:
+ - filepath: testfol/subdir/deployment.yaml
+ chartpath: vault-consul-dev/templates/deployment.yaml
diff --git a/kud/tests/vnfs/testrb/helm/profile/override_values.yaml b/kud/tests/vnfs/testrb/helm/profile/override_values.yaml
new file mode 100644
index 00000000..c9f29a31
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/profile/override_values.yaml
@@ -0,0 +1,7 @@
+service:
+ type: NodePort
+ name: override-vault-consul
+ portName: override-port-vault-consul
+ internalPort: 8222
+ nodePort: 44
+
diff --git a/kud/tests/vnfs/testrb/helm/profile/testfol/subdir/deployment.yaml b/kud/tests/vnfs/testrb/helm/profile/testfol/subdir/deployment.yaml
new file mode 100644
index 00000000..938e1843
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/profile/testfol/subdir/deployment.yaml
@@ -0,0 +1,51 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "common.name" . }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: "{{ .Values.image.vault }}"
+ name: {{ include "common.name" . }}
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ command: ["vault","server","-dev"]
+ ports:
+ - containerPort: {{ .Values.service.internalPort }}
+ volumeMounts:
+ - mountPath: /etc/localtime
+ name: localtime
+ readOnly: true
+
+ volumes:
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/Chart.yaml b/kud/tests/vnfs/testrb/helm/vault-consul-dev/Chart.yaml
new file mode 100644
index 00000000..86643c9d
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/Chart.yaml
@@ -0,0 +1,19 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: Chart to launch Vault and consul in dev mode
+name: vault-consul-dev
+appVersion: 0.9.5
+version: 2.0.0
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/Chart.yaml b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/Chart.yaml
new file mode 100644
index 00000000..7d58e53d
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: Common templates for inclusion in other charts
+name: common
+version: 2.0.0
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_name.tpl b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_name.tpl
new file mode 100644
index 00000000..42999846
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_name.tpl
@@ -0,0 +1,31 @@
+{{/*
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{/*
+ Expand the name of a chart.
+*/}}
+{{- define "common.name" -}}
+ {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+ Create a default fully qualified application name.
+ Truncated at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "common.fullname" -}}
+ {{- $name := default .Chart.Name .Values.nameOverride -}}
+ {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}} \ No newline at end of file
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_namespace.tpl b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_namespace.tpl
new file mode 100644
index 00000000..94c9ee72
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_namespace.tpl
@@ -0,0 +1,26 @@
+{{/*
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{/*
+ Resolve the namespace to apply to a chart. The default namespace suffix
+ is the name of the chart. This can be overridden if necessary (eg. for subcharts)
+ using the following value:
+
+ - .Values.nsPrefix : override namespace prefix
+*/}}
+{{- define "common.namespace" -}}
+ {{- default .Release.Namespace .Values.nsPrefix -}}
+{{- end -}}
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_repository.tpl b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_repository.tpl
new file mode 100644
index 00000000..364ba7dc
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_repository.tpl
@@ -0,0 +1,48 @@
+{{/*
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{/*
+ Resolve the name of the common image repository.
+ The value for .Values.repository is used by default,
+ unless either override mechanism is used.
+
+ - .Values.global.repository : override default image repository for all images
+ - .Values.repositoryOverride : override global and default image repository on a per image basis
+*/}}
+{{- define "common.repository" -}}
+ {{if .Values.repositoryOverride }}
+ {{- printf "%s" .Values.repositoryOverride -}}
+ {{else}}
+ {{- default .Values.repository .Values.global.repository -}}
+ {{end}}
+{{- end -}}
+
+
+{{/*
+ Resolve the image repository secret token.
+ The value for .Values.global.repositoryCred is used:
+ repositoryCred:
+ user: user
+ password: password
+ mail: email (optional)
+*/}}
+{{- define "common.repository.secret" -}}
+ {{- $repo := include "common.repository" . }}
+ {{- $cred := .Values.global.repositoryCred }}
+ {{- $mail := default "@" $cred.mail }}
+ {{- $auth := printf "%s:%s" $cred.user $cred.password | b64enc }}
+ {{- printf "{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"email\":\"%s\",\"auth\":\"%s\"}}" $repo $cred.user $cred.password $mail $auth | b64enc -}}
+{{- end -}}
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_service.tpl b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_service.tpl
new file mode 100644
index 00000000..77b77d05
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/templates/_service.tpl
@@ -0,0 +1,31 @@
+{{/*
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{/*
+ Resolve the name of a chart's service.
+
+ The default will be the chart name (or .Values.nameOverride if set).
+ And the use of .Values.service.name overrides all.
+
+ - .Values.service.name : override default service (ie. chart) name
+*/}}
+{{/*
+ Expand the service name for a chart.
+*/}}
+{{- define "common.servicename" -}}
+ {{- $name := default .Chart.Name .Values.nameOverride -}}
+ {{- default $name .Values.service.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}} \ No newline at end of file
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/values.yaml b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/values.yaml
new file mode 100644
index 00000000..f7098ee8
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/charts/common/values.yaml
@@ -0,0 +1,44 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration default values that can be inherited by
+# all subcharts.
+#################################################################
+global:
+ # Change to an unused port prefix range to prevent port conflicts
+ # with other instances running within the same k8s cluster
+ nodePortPrefix: 302
+
+ # image repositories
+ repository: nexus3.onap.org:10001
+
+ # readiness check
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+
+ # logging agent
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+
+ # image pull policy
+ pullPolicy: Always
+
+ # default mount path root directory referenced
+ # by persistent volumes and log files
+ persistence:
+ mountPath: /dockerdata-nfs
+
+ # flag to enable debugging - application support required
+ debugEnabled: true
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/templates/deployment.yaml b/kud/tests/vnfs/testrb/helm/vault-consul-dev/templates/deployment.yaml
new file mode 100644
index 00000000..66b2e747
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/templates/deployment.yaml
@@ -0,0 +1,62 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "common.name" . }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: "{{ .Values.image.vault }}"
+ name: {{ include "common.name" . }}
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ command: ["vault","server","-dev"]
+ ports:
+ - containerPort: {{ .Values.service.internalPort }}
+ volumeMounts:
+ - mountPath: /etc/localtime
+ name: localtime
+ readOnly: true
+
+ - image: "{{ .Values.image.consul }}"
+ name: {{ include "common.name" . }}-backend
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ command: ["consul","agent","-dev"]
+ ports:
+ - name: http
+ containerPort: 8500
+ volumeMounts:
+ - mountPath: /etc/localtime
+ name: localtime
+ readOnly: true
+ volumes:
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/templates/service.yaml b/kud/tests/vnfs/testrb/helm/vault-consul-dev/templates/service.yaml
new file mode 100644
index 00000000..04e9a5a9
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/templates/service.yaml
@@ -0,0 +1,39 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "common.servicename" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - name: {{ .Values.service.portName }}
+ {{if eq .Values.service.type "NodePort" -}}
+ port: {{ .Values.service.internalPort }}
+ nodePort: {{ .Values.global.nodePortPrefix | default "302" }}{{ .Values.service.nodePort }}
+ {{- else -}}
+ port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ {{- end}}
+ protocol: TCP
+ selector:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
diff --git a/kud/tests/vnfs/testrb/helm/vault-consul-dev/values.yaml b/kud/tests/vnfs/testrb/helm/vault-consul-dev/values.yaml
new file mode 100644
index 00000000..87c64026
--- /dev/null
+++ b/kud/tests/vnfs/testrb/helm/vault-consul-dev/values.yaml
@@ -0,0 +1,90 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 302
+ persistence: {}
+
+# application image
+image:
+ consul: consul:1.0.6
+ vault: vault:0.10.0
+pullPolicy: Always
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+config:
+ consul:
+ server: true
+ log_level: INFO
+ server: true
+ data_dir: '/consul/data'
+ ports:
+ http: 8500
+ https: -1
+
+ vault:
+ storage:
+ consul:
+ address: localhost:8500
+ path: vault
+ listener:
+ tcp:
+ address: '[::]:8200'
+ tls_disable: true
+ disable_mlock: true
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+readiness:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+
+persistence:
+ enabled: true
+ volumeReclaimPolicy: Retain
+ accessMode: ReadWriteOnce
+ size: 2Gi
+ mountPath: /dockerdata-nfs
+ mountSubPath: sms/consul/data
+
+service:
+ type: NodePort
+ name: vault-consul
+ portName: vault-consul
+ internalPort: 8200
+ nodePort: 44
+
+ingress:
+ enabled: false