summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVictor Morales <victor.morales@intel.com>2018-08-28 15:09:02 -0700
committerVictor Morales <victor.morales@intel.com>2018-08-30 10:11:00 -0700
commit574785c07010a494fbd1456d11e7c0449ad43c38 (patch)
treed0b8bc992752d5344a9de281e01558bd32b6071b
parent88579fa6f563a3bea8c39aa98159eb54d13d44a5 (diff)
Add KRD source code
This changes includes the source code created for the Kubernetes Reference Deployment(KRD) which helps to provide an automated mechanism to install and configure Kubernetes services required for the MultiCloud/K8s plugin. Change-Id: Ica49566fcd531e25846ed3e5062de2f92ec56f6c Signed-off-by: Victor Morales <victor.morales@intel.com> Issue-ID: MULTICLOUD-301
-rw-r--r--.gitignore13
-rwxr-xr-xdeployments/build.sh6
-rw-r--r--doc/sampleCommands.rst71
-rw-r--r--docs/conf.py40
-rw-r--r--docs/create_vl.png (renamed from doc/create_vl.png)bin36106 -> 36106 bytes
-rw-r--r--docs/create_vnf.png (renamed from doc/create_vnf.png)bin47090 -> 47090 bytes
-rw-r--r--docs/img/default_pdf.pngbin0 -> 347636 bytes
-rw-r--r--docs/img/diagram.pngbin0 -> 128882 bytes
-rw-r--r--docs/krd_architecture.rst162
-rw-r--r--docs/sampleCommands.rst84
-rw-r--r--docs/swagger.yaml (renamed from doc/swagger.yaml)0
-rw-r--r--tox.ini25
-rw-r--r--vagrant/README.md52
-rw-r--r--vagrant/Vagrantfile114
-rw-r--r--vagrant/config/default.yml53
-rw-r--r--vagrant/config/samples/pdf.yml.aio25
-rw-r--r--vagrant/config/samples/pdf.yml.mini33
-rw-r--r--vagrant/galaxy-requirements.yml15
-rwxr-xr-xvagrant/installer.sh269
-rw-r--r--vagrant/inventory/group_vars/k8s-cluster.yml156
-rwxr-xr-xvagrant/node.sh55
-rw-r--r--vagrant/playbooks/Debian.yml22
-rw-r--r--vagrant/playbooks/RedHat.yml19
-rw-r--r--vagrant/playbooks/Suse.yml20
-rw-r--r--vagrant/playbooks/configure-krd.yml16
-rw-r--r--vagrant/playbooks/configure-multus.yml110
-rw-r--r--vagrant/playbooks/configure-nfd.yml57
-rw-r--r--vagrant/playbooks/configure-ovn-kubernetes.yml131
-rw-r--r--vagrant/playbooks/configure-ovn.yml109
-rw-r--r--vagrant/playbooks/configure-virtlet.yml233
-rw-r--r--vagrant/playbooks/krd-vars.yml50
-rwxr-xr-xvagrant/setup.sh167
-rw-r--r--vagrant/tests/generic_simulator/Dockerfile27
-rw-r--r--vagrant/tests/generic_simulator/aai/responses.yml189
-rw-r--r--vagrant/tests/generic_simulator/generic_sim.py109
-rw-r--r--vagrant/tests/generic_simulator/requirements.txt11
-rwxr-xr-xvagrant/tests/integration_cFW.sh194
-rwxr-xr-xvagrant/tests/integration_vFW.sh295
-rwxr-xr-xvagrant/tests/multus.sh123
-rwxr-xr-xvagrant/tests/nfd.sh62
-rwxr-xr-xvagrant/tests/ovn-kubernetes.sh136
-rwxr-xr-xvagrant/tests/plugin.sh97
-rwxr-xr-xvagrant/tests/virtlet.sh145
43 files changed, 3421 insertions, 74 deletions
diff --git a/.gitignore b/.gitignore
index f0b583fe..b38d6e29 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,10 @@
-# IDE
+# Common
.DS_Store
.vscode
*-workspace
+.tox/
+.*.swp
+*.log
# Directories
pkg
@@ -21,3 +24,11 @@ src/k8splugin/plugins/**/*.so
# Tests
*.test
*.out
+
+# KRD
+.vagrant/
+vagrant/inventory/hosts.ini
+vagrant/inventory/group_vars/all.yml
+vagrant/config/pdf.yml
+*.retry
+*.vdi
diff --git a/deployments/build.sh b/deployments/build.sh
index 667be5f5..7c2d7379 100755
--- a/deployments/build.sh
+++ b/deployments/build.sh
@@ -13,10 +13,12 @@ set -o pipefail
set -o xtrace
function generate_binary {
- GOPATH=$(go env GOPATH)
+ export GOPATH="$(pwd)/../"
rm -f k8plugin
rm -f *.so
- $GOPATH/bin/dep ensure -v
+ pushd ../src/k8splugin/
+ dep ensure -v
+ popd
for plugin in deployment namespace service; do
CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -buildmode=plugin -a -tags netgo -o ./$plugin.so ../src/k8splugin/plugins/$plugin/plugin.go
done
diff --git a/doc/sampleCommands.rst b/doc/sampleCommands.rst
deleted file mode 100644
index e8b53cf3..00000000
--- a/doc/sampleCommands.rst
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2018 Intel Corporation.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Sample Commands:
-
-* POST
- URL:`localhost:8081/v1/vnf_instances/cloudregion1/namespacetest`
- Request Body:
-
- ```
- {
- "cloud_region_id": "region1",
- "csar_id": "uuid",
- "namespace": "test",
- "oof_parameters": [{
- "key1": "value1",
- "key2": "value2",
- "key3": {}
- }],
- "network_parameters": {
- "oam_ip_address": {
- "connection_point": "string",
- "ip_address": "string",
- "workload_name": "string"
- }
- }
- }
- ```
-
- Expected Response:
- ```
- {
- "response": "Created Deployment:nginx-deployment"
- }
- ```
-
- The above POST request will download the following YAML file and run it on the Kubernetes cluster.
-
- ```
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: nginx-deployment
- labels:
- app: nginx
- spec:
- replicas: 3
- selector:
- matchLabels:
- app: nginx
- template:
- metadata:
- labels:
- app: nginx
- spec:
- containers:
- - name: nginx
- image: nginx:1.7.9
- ports:
- - containerPort: 80
- ```
-* GET
- URL: `localhost:8081/v1/vnf_instances`
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..2e30879a
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
+
+sys.path.insert(0, ROOT)
+sys.path.insert(0, BASE_DIR)
+
+# -- General configuration ----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ #'sphinx.ext.intersphinx'
+]
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'MultiCloud Kubernetes Plugin'
diff --git a/doc/create_vl.png b/docs/create_vl.png
index 803b6da8..803b6da8 100644
--- a/doc/create_vl.png
+++ b/docs/create_vl.png
Binary files differ
diff --git a/doc/create_vnf.png b/docs/create_vnf.png
index 27b50d79..27b50d79 100644
--- a/doc/create_vnf.png
+++ b/docs/create_vnf.png
Binary files differ
diff --git a/docs/img/default_pdf.png b/docs/img/default_pdf.png
new file mode 100644
index 00000000..a783cb3b
--- /dev/null
+++ b/docs/img/default_pdf.png
Binary files differ
diff --git a/docs/img/diagram.png b/docs/img/diagram.png
new file mode 100644
index 00000000..897801da
--- /dev/null
+++ b/docs/img/diagram.png
Binary files differ
diff --git a/docs/krd_architecture.rst b/docs/krd_architecture.rst
new file mode 100644
index 00000000..f188135e
--- /dev/null
+++ b/docs/krd_architecture.rst
@@ -0,0 +1,162 @@
+.. Copyright 2018 Intel Corporation.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+****************
+KRD Architecture
+****************
+
+This document explains the different components of the Kubernetes
+Reference Deployment project and how they can be configured to modify
+its default behaviour.
+
+Vagranfile
+##########
+
+This file describes how the Virtual Machines are going to be
+configured and the scripts and arguments used during their
+provisioning process. This file uses *elastic/ubuntu-16.04-x86_64*
+vagrant box for VirtualBox and Libvirt providers.
+
+config/
+#######
+
+This folder contains the POD Descriptor File (PDF) which is used
+by Vagrant during the provisioning process. The *samples* folder
+contains examples for some setups (All-in-One, Mini, NoHA, HA, etc.)
+that can be used.
+
+This list contains the valid entries used by Vagrant to define the virtual
+resources used by Vagrant during the creation of the Virtual Machines:
+
+ * ip - The static IP address assigned to the VM. (String value)
+ * memory - The amount of memory RAM. (KB - Integer value)
+ * cpus - Number of CPUs. (Integer value)
+ * volumes - List of volumes to be formatted and mounted to the VM.
+
+config/default.yml
+******************
+
+If there is no *pdf.yml* file present in *config* folder, Vagrant will
+use the information specified in the **config/default.yml**. The following
+diagram displays how the services are installed in the nodes using the
+default configuration.
+
+.. image:: ./img/default_pdf.png
+
+docs/
+#####
+
+This folder contains documentation files using reStructuredText
+(RST) syntax. It's possible to generate documentation in *html*
+format using `python tox module <https://tox.readthedocs.io/en/latest/>`_
+. Once this is installed, it's possible to build html files using
+this following command:
+
+.. code-block:: bash
+
+ tox -e docs
+
+After its execution, the **docs/build** subfolder will contain
+subfolders and html files that can be opened from any web browser.
+
+galaxy-requirements.yml
+#######################
+
+This file contains third party Ansible roles. Only those tasks which
+are not related with the main installation process have been placed in
+this file.
+
+installer.sh
+############
+
+Main bash script that installs dependencies and executes ansible
+playbooks for provisioning KRD components on external nodes. This
+script uses some arguments for the additional installation of
+components. For more information about its usage:
+
+.. code-block:: bash
+
+ ./installer.sh -h
+
+inventory/
+##########
+
+This folder contains the Ansible host inventory file. The
+**inventory/host.ini** file, which is used during the execution of
+Ansible playbooks, is created by Vagrant using the values specified
+in the *config/pdf.yml* file (or *config/default.yml*).
+
+inventory/group_vars/k8s-cluster.yml
+************************************
+
+A preferred practice in Ansible is to not store variables in the
+main inventory file. The configuration variables required for
+`Kubespray <https://github.com/kubernetes-incubator/kubespray>`_ are
+stored in this file.
+
+node.sh
+#######
+
+This bash script is executed in every node after this has been
+provisioned. The script provides the possibility to partition and
+mount external volumes.
+
+playbooks/
+##########
+
+This folder contains a set of Ansible playbooks which perform the
+tasks required for configuring services like Multus, Virtlet and/or
+OVN.
+
+playbooks/configure-krd.yml
+***************************
+
+This ansible playbook collects the common actions among all the
+Kubernetes AddOns offered by the KRD.
+
+playbooks/krd-vars.yml
+************************
+
+This file centralizes the version numbers and source URLs used for
+different components offered by the KRD. Bumping a version requires
+extensive testing to ensure compatibility.
+
+setup.sh
+########
+
+This bash script is used for the installation and configuration of
+dependencies required for the usage of the KRD via Virtual Machines.
+Some of this dependencies are:
+
+ - `Vagrant <https://www.vagrantup.com/>`_,
+ - `Libvirt <https://libvirt.org/>`_ or `VirtualBox <https://www.virtualbox.org/>`_
+
+The *-p* argument determines the Virtualization provider to be used
+and installed in the host machine.
+
+.. code-block:: bash
+
+ ./setup.sh -p libvirt
+
+Vagrant uses VirtualBox as default Virtualization provider. It's
+possible to modify this behavior using the global enviroment variable
+named **VAGRANT_DEFAULT_PROVIDER**.
+
+.. note:: The execution of this script is recommended only during the initial setup.
+
+tests/
+######
+
+This folder contains the health check scripts that guarantee the
+proper installation/configuration of Kubernetes AddOns. Its
+execution is disabled by default. In order to enable it, it's
+necessary to pass the *-t* argument to the **installer.sh** bash
+script, usually through changing the arguments in the *Vagrantfile*.
diff --git a/docs/sampleCommands.rst b/docs/sampleCommands.rst
new file mode 100644
index 00000000..2407b260
--- /dev/null
+++ b/docs/sampleCommands.rst
@@ -0,0 +1,84 @@
+.. Copyright 2018 Intel Corporation.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+====================
+Sample cURL commands
+====================
+
+****
+POST
+****
+
+URL: `localhost:8081/v1/vnf_instances/cloudregion1/namespacetest`
+
+Request Body
+------------
+
+.. code-block:: json
+
+ {
+ "cloud_region_id": "region1",
+ "csar_id": "uuid",
+ "namespace": "test",
+ "oof_parameters": [{
+ "key1": "value1",
+ "key2": "value2",
+ "key3": {}
+ }],
+ "network_parameters": {
+ "oam_ip_address": {
+ "connection_point": "string",
+ "ip_address": "string",
+ "workload_name": "string"
+ }
+ }
+ }
+
+Expected Response
+-----------------
+
+.. code-block:: json
+
+ {
+ "response": "Created Deployment:nginx-deployment"
+ }
+
+The above POST request will download the following YAML file and run it on the Kubernetes cluster.
+
+.. code-block:: yaml
+
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.7.9
+ ports:
+ - containerPort: 80
+
+***
+GET
+***
+
+URL: `localhost:8081/v1/vnf_instances`
diff --git a/doc/swagger.yaml b/docs/swagger.yaml
index 3b7e36ba..3b7e36ba 100644
--- a/doc/swagger.yaml
+++ b/docs/swagger.yaml
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..1c3bd862
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,25 @@
+[tox]
+minversion = 1.6
+skipsdist = True
+envlist = bashate
+
+[testenv]
+passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
+usedevelop = False
+install_command = pip install {opts} {packages}
+
+[testenv:bashate]
+deps =
+ {env:BASHATE_INSTALL_PATH:bashate}
+ rstcheck
+whitelist_externals = bash
+commands = bash -c "find {toxinidir} -not -path {toxinidir}/.tox/\* \
+ -name \*.sh -type f \
+# E006 check for lines longer than 79 columns
+ -print0 | xargs -0 bashate -v -iE006"
+ bash -c "find {toxinidir} -not -path {toxinidir}/.tox/\* \
+ -name \*.rst -type f -print0 | xargs -0 rstcheck"
+
+[testenv:docs]
+deps = sphinx
+commands = sphinx-build -W -b html docs/src docs/build/html
diff --git a/vagrant/README.md b/vagrant/README.md
new file mode 100644
index 00000000..c76b081e
--- /dev/null
+++ b/vagrant/README.md
@@ -0,0 +1,52 @@
+# Kubernetes Reference Deployment
+
+## Summary
+
+This project offers a reference for deploying a Kubernetes cluster
+that satisfies the requirements of [ONAP multicloud/k8s plugin][1]. Its
+ansible playbooks allow to provision a deployment on Bare-metal or
+Virtual Machines.
+
+![Diagram](../doc/img/diagram.png)
+
+# Components
+
+| Name | Description | Source | Status |
+|:--------------:|:----------------------------------------------|:----------------------------------|:------:|
+| Kubernetes | Base Kubernetes deployment | [kubespray][2] | Done |
+| ovn-kubernetes | Integrates Opensource Virtual Networking | [configure-ovn-kubernetes.yml][3] | Tested |
+| Virtlet | Allows to run VMs | [configure-virtlet.yml][4] | Tested |
+| Multus | Provides Multiple Network support in a pod | [configure-multus.yml][5] | Tested |
+| NFD | Node feature discovery | [configure-nfd.yml][7] | Tested |
+
+## Deployment
+
+The [installer](installer.sh) bash script contains the minimal
+Ubuntu instructions required for running this project.
+
+### Virtual Machines
+
+This project uses [Vagrant tool][6] for provisioning Virtual Machines
+automatically. The [setup](setup.sh) bash script contains the
+Linux instructions to install dependencies and plugins required for
+its usage. This script supports two Virtualization technologies
+(Libvirt and VirtualBox).
+
+ $ ./setup.sh -p libvirt
+
+Once Vagrant is installed, it's possible to provision a cluster using
+the following instructions:
+
+ $ vagrant up && vagrant up installer
+
+## License
+
+Apache-2.0
+
+[1]: https://git.onap.org/multicloud/k8s
+[2]: https://github.com/kubernetes-incubator/kubespray
+[3]: playbooks/configure-ovn-kubernetes.yml
+[4]: playbooks/configure-virtlet.yml
+[5]: playbooks/configure-multus.yml
+[6]: https://www.vagrantup.com/
+[7]: playbooks/configure-nfd.yml
diff --git a/vagrant/Vagrantfile b/vagrant/Vagrantfile
new file mode 100644
index 00000000..ba71ba7e
--- /dev/null
+++ b/vagrant/Vagrantfile
@@ -0,0 +1,114 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+box = {
+ :virtualbox => { :name => 'elastic/ubuntu-16.04-x86_64', :version => '20180708.0.0' },
+ :libvirt => { :name => 'elastic/ubuntu-16.04-x86_64', :version=> '20180210.0.0'}
+}
+
+require 'yaml'
+pdf = File.dirname(__FILE__) + '/config/default.yml'
+if File.exist?(File.dirname(__FILE__) + '/config/pdf.yml')
+ pdf = File.dirname(__FILE__) + '/config/pdf.yml'
+end
+nodes = YAML.load_file(pdf)
+
+# Inventory file creation
+File.open(File.dirname(__FILE__) + "/inventory/hosts.ini", "w") do |inventory_file|
+ inventory_file.puts("[all:vars]\nansible_connection=ssh\nansible_ssh_user=vagrant\nansible_ssh_pass=vagrant\n\n[all]")
+ nodes.each do |node|
+ inventory_file.puts("#{node['name']}\tansible_ssh_host=#{node['ip']} ansible_ssh_port=22")
+ end
+ ['kube-master', 'kube-node', 'etcd', 'ovn-central', 'ovn-controller', 'virtlet'].each do|group|
+ inventory_file.puts("\n[#{group}]")
+ nodes.each do |node|
+ if node['roles'].include?("#{group}")
+ inventory_file.puts(node['name'])
+ end
+ end
+ end
+ inventory_file.puts("\n[k8s-cluster:children]\nkube-node\nkube-master")
+end
+
+provider = (ENV['VAGRANT_DEFAULT_PROVIDER'] || :virtualbox).to_sym
+puts "[INFO] Provider: #{provider} "
+
+if ENV['no_proxy'] != nil or ENV['NO_PROXY']
+ $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
+ nodes.each do |node|
+ $no_proxy += "," + node['ip']
+ end
+ $subnet = "192.168.121"
+ if provider == :virtualbox
+ $subnet = "10.0.2"
+ end
+ # NOTE: This range is based on vagrant-libvirt network definition CIDR 192.168.121.0/27
+ (1..31).each do |i|
+ $no_proxy += ",#{$subnet}.#{i}"
+ end
+end
+
+Vagrant.configure("2") do |config|
+ config.vm.box = box[provider][:name]
+ config.vm.box_version = box[provider][:version]
+
+ if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil
+ if Vagrant.has_plugin?('vagrant-proxyconf')
+ config.proxy.http = ENV['http_proxy'] || ENV['HTTP_PROXY'] || ""
+ config.proxy.https = ENV['https_proxy'] || ENV['HTTPS_PROXY'] || ""
+ config.proxy.no_proxy = $no_proxy
+ config.proxy.enabled = { docker: false }
+ end
+ end
+
+ nodes.each do |node|
+ config.vm.define node['name'] do |nodeconfig|
+ nodeconfig.vm.hostname = node['name']
+ nodeconfig.vm.network :private_network, :ip => node['ip'], :type => :static
+ nodeconfig.vm.provider 'virtualbox' do |v|
+ v.customize ["modifyvm", :id, "--memory", node['memory']]
+ v.customize ["modifyvm", :id, "--cpus", node['cpus']]
+ if node.has_key? "volumes"
+ node['volumes'].each do |volume|
+ $volume_file = "#{node['name']}-#{volume['name']}.vdi"
+ unless File.exist?($volume_file)
+ v.customize ['createmedium', 'disk', '--filename', $volume_file, '--size', volume['size']]
+ end
+ v.customize ['storageattach', :id, '--storagectl', 'IDE Controller', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', $volume_file]
+ end
+ end
+ end
+ nodeconfig.vm.provider 'libvirt' do |v|
+ v.memory = node['memory']
+ v.cpus = node['cpus']
+ v.nested = true
+ v.cpu_mode = 'host-passthrough'
+ v.management_network_address = "192.168.121.0/27"
+ nodeconfig.vm.provision 'shell' do |sh|
+ sh.path = "node.sh"
+ if node.has_key? "volumes"
+ $volume_mounts_dict = ''
+ node['volumes'].each do |volume|
+ $volume_mounts_dict += "#{volume['name']}=#{volume['mount']},"
+ $volume_file = "./#{node['name']}-#{volume['name']}.qcow2"
+ v.storage :file, :bus => 'sata', :device => volume['name'], :size => volume['size']
+ end
+ sh.args = ['-v', $volume_mounts_dict[0...-1]]
+ end
+ end
+ end
+ end
+ end
+ sync_type = "virtualbox"
+ if provider == :libvirt
+ sync_type = "nfs"
+ end
+ config.vm.define :installer, primary: true, autostart: false do |installer|
+ installer.vm.hostname = "multicloud"
+ installer.vm.network :private_network, :ip => "10.10.10.2", :type => :static
+ installer.vm.provision 'shell' do |sh|
+ sh.path = "installer.sh"
+ sh.args = ['-p', '-v', '-w', '/vagrant']
+ end
+ end
+end
diff --git a/vagrant/config/default.yml b/vagrant/config/default.yml
new file mode 100644
index 00000000..6f26d2d2
--- /dev/null
+++ b/vagrant/config/default.yml
@@ -0,0 +1,53 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "controller01"
+ ip: "10.10.10.3"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-master
+ - etcd
+ - ovn-central
+- name: "controller02"
+ ip: "10.10.10.4"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-master
+ - etcd
+ - ovn-controller
+- name: "controller03"
+ ip: "10.10.10.5"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-master
+ - etcd
+ - ovn-controller
+- name: "compute01"
+ ip: "10.10.10.6"
+ memory: 8192
+ cpus: 2
+ volumes:
+ - name: sda
+ size: 50
+ mount: /var/lib/docker/
+ roles:
+ - kube-node
+ - ovn-controller
+ - virtlet
+- name: "compute02"
+ ip: "10.10.10.7"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-node
+ - ovn-controller
diff --git a/vagrant/config/samples/pdf.yml.aio b/vagrant/config/samples/pdf.yml.aio
new file mode 100644
index 00000000..2ad95639
--- /dev/null
+++ b/vagrant/config/samples/pdf.yml.aio
@@ -0,0 +1,25 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "kubernetes"
+ ip: "10.10.10.3"
+ memory: 8192
+ cpus: 2
+ volumes:
+ - name: sda
+ size: 50
+ mount: /var/lib/docker/
+ roles:
+ - kube-master
+ - etcd
+ - ovn-central
+ - kube-node
+ - ovn-controller
+ - virtlet
diff --git a/vagrant/config/samples/pdf.yml.mini b/vagrant/config/samples/pdf.yml.mini
new file mode 100644
index 00000000..d53a4537
--- /dev/null
+++ b/vagrant/config/samples/pdf.yml.mini
@@ -0,0 +1,33 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "master"
+ ip: "10.10.10.3"
+ memory: 8192
+ cpus: 2
+ roles:
+ - kube-master
+ - etcd
+ - ovn-central
+- name: "minion01"
+ ip: "10.10.10.4"
+ memory: 65536
+ cpus: 16
+ roles:
+ - kube-node
+ - ovn-controller
+ - virtlet
+- name: "minion02"
+ ip: "10.10.10.5"
+ memory: 65536
+ cpus: 16
+ roles:
+ - kube-node
+ - ovn-controller
diff --git a/vagrant/galaxy-requirements.yml b/vagrant/galaxy-requirements.yml
new file mode 100644
index 00000000..42fca71b
--- /dev/null
+++ b/vagrant/galaxy-requirements.yml
@@ -0,0 +1,15 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- src: andrewrothstein.go
+ version: v2.1.7
+- src: andrewrothstein.kubectl
+ version: v1.1.12
+- src: geerlingguy.docker
+ version: 2.5.1
diff --git a/vagrant/installer.sh b/vagrant/installer.sh
new file mode 100755
index 00000000..29866a82
--- /dev/null
+++ b/vagrant/installer.sh
@@ -0,0 +1,269 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# usage() - Prints the usage of the program
+function usage {
+ cat <<EOF
+usage: $0 [-a addons] [-p] [-v] [-w dir ]
+Optional Argument:
+ -a List of Kubernetes AddOns to be installed ( e.g. "ovn-kubernetes virtlet multus")
+ -p Installation of ONAP MultiCloud Kubernetes plugin
+ -v Enable verbosity
+ -w Working directory
+ -t Running healthchecks
+EOF
+}
+
+# _install_go() - Install GoLang package
+function _install_go {
+ version=$(grep "go_version" ${krd_playbooks}/krd-vars.yml | awk -F ': ' '{print $2}')
+ local tarball=go$version.linux-amd64.tar.gz
+
+ if $(go version &>/dev/null); then
+ return
+ fi
+
+ wget https://dl.google.com/go/$tarball
+ tar -C /usr/local -xzf $tarball
+ rm $tarball
+
+ export PATH=$PATH:/usr/local/go/bin
+ sed -i "s|^PATH=.*|PATH=\"$PATH\"|" /etc/environment
+ export INSTALL_DIRECTORY=/usr/local/bin
+ curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
+}
+
+# _install_pip() - Install Python Package Manager
+function _install_pip {
+ if $(pip --version &>/dev/null); then
+ return
+ fi
+ apt-get install -y python-dev
+ curl -sL https://bootstrap.pypa.io/get-pip.py | python
+ pip install --upgrade pip
+}
+
+# _install_ansible() - Install and Configure Ansible program
+function _install_ansible {
+ mkdir -p /etc/ansible/
+ cat <<EOL > /etc/ansible/ansible.cfg
+[defaults]
+host_key_checking = false
+EOL
+ if $(ansible --version &>/dev/null); then
+ return
+ fi
+ _install_pip
+ pip install ansible
+}
+
+# _install_docker() - Download and install docker-engine
+function _install_docker {
+ local max_concurrent_downloads=${1:-3}
+
+ if $(docker version &>/dev/null); then
+ return
+ fi
+ apt-get install -y software-properties-common linux-image-extra-$(uname -r) linux-image-extra-virtual apt-transport-https ca-certificates curl
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+ apt-get update
+ apt-get install -y docker-ce
+
+ mkdir -p /etc/systemd/system/docker.service.d
+ if [ $http_proxy ]; then
+ cat <<EOL > /etc/systemd/system/docker.service.d/http-proxy.conf
+[Service]
+Environment="HTTP_PROXY=$http_proxy"
+EOL
+ fi
+ if [ $https_proxy ]; then
+ cat <<EOL > /etc/systemd/system/docker.service.d/https-proxy.conf
+[Service]
+Environment="HTTPS_PROXY=$https_proxy"
+EOL
+ fi
+ if [ $no_proxy ]; then
+ cat <<EOL > /etc/systemd/system/docker.service.d/no-proxy.conf
+[Service]
+Environment="NO_PROXY=$no_proxy"
+EOL
+ fi
+ systemctl daemon-reload
+ echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --max-concurrent-downloads $max_concurrent_downloads \"" >> /etc/default/docker
+ usermod -aG docker $USER
+
+ systemctl restart docker
+ sleep 10
+}
+
+# install_k8s() - Install Kubernetes using kubespray tool
+function install_k8s {
+ echo "Deploying kubernetes"
+ local dest_folder=/opt
+ version=$(grep "kubespray_version" ${krd_playbooks}/krd-vars.yml | awk -F ': ' '{print $2}')
+ local tarball=v$version.tar.gz
+
+ apt-get install -y sshpass
+ _install_ansible
+ wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball
+ tar -C $dest_folder -xzf $tarball
+ rm $tarball
+
+ pushd $dest_folder/kubespray-$version
+ pip install -r requirements.txt
+ rm -f $krd_inventory_folder/group_vars/all.yml
+ if [[ -n "${verbose+x}" ]]; then
+ echo "kube_log_level: 5" >> $krd_inventory_folder/group_vars/all.yml
+ else
+ echo "kube_log_level: 2" >> $krd_inventory_folder/group_vars/all.yml
+ fi
+ if [[ -n "${http_proxy+x}" ]]; then
+ echo "http_proxy: \"$http_proxy\"" >> $krd_inventory_folder/group_vars/all.yml
+ fi
+ if [[ -n "${https_proxy+x}" ]]; then
+ echo "https_proxy: \"$https_proxy\"" >> $krd_inventory_folder/group_vars/all.yml
+ fi
+ ansible-playbook $verbose -i $krd_inventory cluster.yml -b | tee $log_folder/setup-kubernetes.log
+ popd
+
+ # Configure environment
+ mkdir -p $HOME/.kube
+ mv $HOME/admin.conf $HOME/.kube/config
+}
+
+# install_addons() - Install Kubenertes AddOns
+function install_addons {
+ echo "Installing Kubernetes AddOns"
+ apt-get install -y sshpass
+ _install_ansible
+ ansible-galaxy install -r $krd_folder/galaxy-requirements.yml --ignore-errors
+
+ ansible-playbook $verbose -i $krd_inventory $krd_playbooks/configure-krd.yml | tee $log_folder/setup-krd.log
+ for addon in $addons; do
+ echo "Deploying $addon using configure-$addon.yml playbook.."
+ ansible-playbook $verbose -i $krd_inventory $krd_playbooks/configure-${addon}.yml | tee $log_folder/setup-${addon}.log
+ if [[ -n "${testing_enabled+x}" ]]; then
+ pushd $krd_tests
+ bash ${addon}.sh
+ popd
+ fi
+ done
+}
+
+# install_plugin() - Install ONAP Multicloud Kubernetes plugin
+function install_plugin {
+ echo "Installing multicloud/k8s plugin"
+ _install_go
+ _install_docker
+ pip install docker-compose
+
+ mkdir -p /opt/{csar,kubeconfig,consul/config}
+ cp $HOME/.kube/config /opt/kubeconfig/krd
+ export CSAR_DIR=/opt/csar
+ export KUBE_CONFIG_DIR=/opt/kubeconfig
+ echo "export CSAR_DIR=${CSAR_DIR}" >> /etc/environment
+ echo "export KUBE_CONFIG_DIR=${KUBE_CONFIG_DIR}" >> /etc/environment
+
+ GOPATH=$(go env GOPATH)
+ git clone https://git.onap.org/multicloud/k8s $GOPATH/src/k8-plugin-multicloud
+ pushd $GOPATH/src/k8-plugin-multicloud/deployments
+ ./build.sh
+ docker-compose up -d
+ popd
+
+ if [[ -n "${testing_enabled+x}" ]]; then
+ pushd $krd_tests
+ bash plugin.sh
+ popd
+ fi
+}
+
+# _install_crictl() - Install Container Runtime Interface (CRI) CLI
+function _install_crictl {
+ local version="v1.0.0-alpha.0" # More info: https://github.com/kubernetes-incubator/cri-tools#current-status
+
+ wget https://github.com/kubernetes-incubator/cri-tools/releases/download/$version/crictl-$version-linux-amd64.tar.gz
+ tar zxvf crictl-$version-linux-amd64.tar.gz -C /usr/local/bin
+ rm -f crictl-$version-linux-amd64.tar.gz
+
+ cat << EOL > /etc/crictl.yaml
+runtime-endpoint: unix:///run/criproxy.sock
+image-endpoint: unix:///run/criproxy.sock
+EOL
+}
+
+# _print_kubernetes_info() - Prints the login Kubernetes information
+function _print_kubernetes_info {
+ if ! $(kubectl version &>/dev/null); then
+ return
+ fi
+ # Expose Dashboard using NodePort
+ KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" kubectl -n kube-system edit service kubernetes-dashboard
+
+ master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F ":" '{print $2}')
+ node_port=$(kubectl get service -n kube-system | grep kubernetes-dashboard | awk '{print $5}' |awk -F "[:/]" '{print $2}')
+
+ printf "Kubernetes Info\n===============\n" > $k8s_info_file
+ echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file
+ echo "Admin user: kube" >> $k8s_info_file
+ echo "Admin password: secret" >> $k8s_info_file
+}
+
+# Configuration values
+addons="virtlet ovn-kubernetes multus nfd"
+krd_folder="$(dirname "$0")"
+verbose=""
+
+while getopts "a:pvw:t" opt; do
+ case $opt in
+ a)
+ addons="$OPTARG"
+ ;;
+ p)
+ plugin_enabled="true"
+ ;;
+ v)
+ set -o xtrace
+ verbose="-vvv"
+ ;;
+ w)
+ krd_folder="$OPTARG"
+ ;;
+ t)
+ testing_enabled="true"
+ ;;
+ ?)
+ usage
+ exit
+ ;;
+ esac
+done
+log_folder=/var/log/krd
+krd_inventory_folder=$krd_folder/inventory
+krd_inventory=$krd_inventory_folder/hosts.ini
+krd_playbooks=$krd_folder/playbooks
+krd_tests=$krd_folder/tests
+k8s_info_file=$krd_folder/k8s_info.log
+
+mkdir -p $log_folder
+
+# Install dependencies
+apt-get update
+install_k8s
+install_addons
+if [[ -n "${plugin_enabled+x}" ]]; then
+ install_plugin
+fi
+_print_kubernetes_info
diff --git a/vagrant/inventory/group_vars/k8s-cluster.yml b/vagrant/inventory/group_vars/k8s-cluster.yml
new file mode 100644
index 00000000..ab5bf7b9
--- /dev/null
+++ b/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -0,0 +1,156 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+kubelet_load_modules: true
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+docker_dns_servers_strict: false
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: true
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "secret"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+kube_token_auth: true
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: flannel
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: false
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# Enable MountPropagation gate feature
+local_volumes_enabled: true
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+kube_version: v1.11.2
diff --git a/vagrant/node.sh b/vagrant/node.sh
new file mode 100755
index 00000000..e6702457
--- /dev/null
+++ b/vagrant/node.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o nounset
+set -o pipefail
+set -o xtrace
+
+# usage() - Prints the usage of the program
+function usage {
+ cat <<EOF
+usage: $0 [-v volumes]
+Optional Argument:
+ -v List of key pair values for volumes and mount points ( e. g. sda=/var/lib/docker/,sdb=/var/lib/libvirt/ )
+EOF
+}
+
+# mount_external_partition() - Create partition and mount the external volume
+function mount_external_partition {
+ local dev_name="/dev/$1"
+ local mount_dir=$2
+
+ sfdisk $dev_name --no-reread << EOF
+;
+EOF
+ mkfs -t ext4 ${dev_name}1
+ mkdir -p $mount_dir
+ mount ${dev_name}1 $mount_dir
+ echo "${dev_name}1 $mount_dir ext4 errors=remount-ro,noatime,barrier=0 0 1" >> /etc/fstab
+}
+
+while getopts "h?v:" opt; do
+ case $opt in
+ v)
+ dict_volumes="$OPTARG"
+ ;;
+ h|\?)
+ usage
+ exit
+ ;;
+ esac
+done
+
+swapoff -a
+if [[ -n "${dict_volumes+x}" ]]; then
+ for kv in ${dict_volumes//,/ } ;do
+ mount_external_partition ${kv%=*} ${kv#*=}
+ done
+fi
diff --git a/vagrant/playbooks/Debian.yml b/vagrant/playbooks/Debian.yml
new file mode 100644
index 00000000..96357fe2
--- /dev/null
+++ b/vagrant/playbooks/Debian.yml
@@ -0,0 +1,22 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+openvswitch_service: openvswitch-switch
+openvswitch_pkgs:
+ - openvswitch-common
+ - openvswitch-switch
+ - libopenvswitch
+ - openvswitch-datapath-dkms
+ovn_central_service: ovn-central
+ovn_central_pkgs:
+ - ovn-central # <= 2.8.1-1
+ovn_controller_service: ovn-host
+ovn_pkgs:
+ - ovn-common # <= 2.8.1-1
+ - ovn-host
diff --git a/vagrant/playbooks/RedHat.yml b/vagrant/playbooks/RedHat.yml
new file mode 100644
index 00000000..fe839bbd
--- /dev/null
+++ b/vagrant/playbooks/RedHat.yml
@@ -0,0 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+openvswitch_service:
+openvswitch_pkgs:
+ - openvswitch
+ovn_central_service: ovn-central
+ovn_central_pkgs:
+ - ovn-central # <= 2.8.1-1
+ovn_controller_service: ovn-host
+ovn_pkgs:
+ - ovn-common # <= 2.8.1-1
+ - ovn-host
diff --git a/vagrant/playbooks/Suse.yml b/vagrant/playbooks/Suse.yml
new file mode 100644
index 00000000..17d1147c
--- /dev/null
+++ b/vagrant/playbooks/Suse.yml
@@ -0,0 +1,20 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+openvswitch_service:
+openvswitch_pkgs:
+ - openvswitch
+ - openvswitch-switch
+ovn_central_service: ovn-central
+ovn_central_pkgs:
+ - ovn-central # <= 2.8.1-1
+ovn_controller_service: ovn-host
+ovn_pkgs:
+ - ovn-common # <= 2.8.1-1
+ - ovn-host
diff --git a/vagrant/playbooks/configure-krd.yml b/vagrant/playbooks/configure-krd.yml
new file mode 100644
index 00000000..c8146ed8
--- /dev/null
+++ b/vagrant/playbooks/configure-krd.yml
@@ -0,0 +1,16 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: kube-node
+ become: yes
+ tasks:
+ - name: copy admin.conf file to kube-nodes
+ copy:
+ src: "{{ ansible_env.HOME}}/.kube/config"
+ dest: "/etc/kubernetes/admin.conf"
diff --git a/vagrant/playbooks/configure-multus.yml b/vagrant/playbooks/configure-multus.yml
new file mode 100644
index 00000000..58eda4bd
--- /dev/null
+++ b/vagrant/playbooks/configure-multus.yml
@@ -0,0 +1,110 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: kube-node
+ become: yes
+ pre_tasks:
+ - name: Load krd variables
+ include_vars:
+ file: krd-vars.yml
+ roles:
+ - { role: andrewrothstein.go, when: multus_source_type == "source" }
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin/"
+ tasks:
+ - name: create multus binary folder
+ file:
+ state: directory
+ path: "{{ item }}"
+ with_items:
+ - /opt/cni/bin
+ - "{{ multus_dest }}"
+ - name: getting source code
+ block:
+ - name: clone Multus repo
+ git:
+ repo: "{{ multus_url }}"
+ dest: "{{ multus_dest }}"
+ version: "{{ multus_version }}"
+ force: yes
+ - name: build multus source code
+ command: ./build
+ args:
+ chdir: "{{ multus_dest }}"
+ - name: copy multus binary to opt folder
+ command: "mv {{ multus_dest }}/bin/multus /opt/cni/bin/multus"
+ when: multus_source_type == "source"
+ - name: getting binary
+ block:
+ - name: download Multus tarball
+ get_url:
+ url: "{{ multus_url }}"
+ dest: "/tmp/multus.tar.gz"
+ - name: extract multus source code
+ unarchive:
+ src: "/tmp/multus.tar.gz"
+ dest: "{{ multus_dest }}"
+ remote_src: yes
+ - name: copy multus binary to opt folder
+ command: "mv {{ multus_dest }}/multus-cni_v{{ multus_version }}_linux_amd64/multus-cni /opt/cni/bin/multus"
+ when: multus_source_type == "tarball"
+ - name: create multus configuration file
+ blockinfile:
+ marker: ""
+ path: /etc/cni/net.d/00-multus.conf
+ create: yes
+ block: |
+ {
+ "type": "multus",
+ "kubeconfig": "/etc/kubernetes/admin.conf",
+ "delegates": [
+ {
+ "type": "flannel",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }
+ ]
+ }
+
+- hosts: localhost
+ roles:
+ - andrewrothstein.kubectl
+ tasks:
+ - name: define a CRD network object specification
+ blockinfile:
+ path: /tmp/crdnetwork.yml
+ create: yes
+ block: |
+ apiVersion: apiextensions.k8s.io/v1beta1
+ kind: CustomResourceDefinition
+ metadata:
+ # name must match the spec fields below, and be in the form: <plural>.<group>
+ name: networks.kubernetes.cni.cncf.io
+ spec:
+ # group name to use for REST API: /apis/<group>/<version>
+ group: kubernetes.cni.cncf.io
+ # version name to use for REST API: /apis/<group>/<version>
+ version: v1
+ # either Namespaced or Cluster
+ scope: Namespaced
+ names:
+ # plural name to be used in the URL: /apis/<group>/<version>/<plural>
+ plural: networks
+ # singular name to be used as an alias on the CLI and for display
+ singular: network
+ # kind is normally the CamelCased singular type. Your resource manifests use this.
+ kind: Network
+ # shortNames allow shorter string to match your resource on the CLI
+ shortNames:
+ - net
+ - name: create network objects
+ shell: "/usr/local/bin/kubectl apply -f /tmp/crdnetwork.yml"
+ ignore_errors: True
diff --git a/vagrant/playbooks/configure-nfd.yml b/vagrant/playbooks/configure-nfd.yml
new file mode 100644
index 00000000..90bad671
--- /dev/null
+++ b/vagrant/playbooks/configure-nfd.yml
@@ -0,0 +1,57 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- hosts: kube-node
+ tasks:
+ - name: Load krd variables
+ include_vars:
+ file: krd-vars.yml
+ - name: clone NFD repo
+ git:
+ repo: "{{ nfd_url }}"
+ dest: "{{ nfd_dest }}"
+ version: "{{ nfd_version }}"
+ force: yes
+ when: nfd_source_type == "source"
+ - name: build NFD image
+ become: yes
+ make:
+ chdir: "{{ nfd_dest }}"
+ - name: get NDF image name
+ become: yes
+ shell: "docker images | grep kubernetes_incubator | awk '{printf(\"%s:%s\\n\", $1,$2)}'"
+ register: nfd_image
+ - name: replace NFD image name
+ lineinfile:
+ path: "{{ nfd_dest }}/node-feature-discovery-{{ item }}.json.template"
+ regexp: "\"image\": \"quay.io/kubernetes_incubator.*i"
+ line: "\"image\": \"{{ nfd_image.stdout }}\","
+ with_items:
+ - daemonset
+ - job
+ - name: copying rbac and daemonset files
+ fetch:
+ src: "{{ nfd_dest }}/{{ item }}"
+ dest: "/tmp/"
+ flat: yes
+ with_items:
+ - rbac.yaml
+ - node-feature-discovery-daemonset.json.template
+
+- hosts: localhost
+ become: yes
+ roles:
+ - andrewrothstein.kubectl
+ tasks:
+ - name: create service accounts
+ command: "/usr/local/bin/kubectl apply -f /tmp/{{ item }}"
+ with_items:
+ - rbac.yaml
+ - node-feature-discovery-daemonset.json.template
diff --git a/vagrant/playbooks/configure-ovn-kubernetes.yml b/vagrant/playbooks/configure-ovn-kubernetes.yml
new file mode 100644
index 00000000..cea102f2
--- /dev/null
+++ b/vagrant/playbooks/configure-ovn-kubernetes.yml
@@ -0,0 +1,131 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- import_playbook: configure-ovn.yml
+
+- hosts: ovn-central:ovn-controller
+ vars:
+ central_node_ip: "{{ hostvars[groups['ovn-central'][0]]['ansible_ssh_host'] }}"
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin/"
+ roles:
+ - role: andrewrothstein.go
+ tasks:
+ - name: Load krd variables
+ include_vars:
+ file: krd-vars.yml
+ - name: clone ovn-kubernetes repo
+ git:
+ repo: "{{ ovn_kubernetes_url }}"
+ dest: "{{ ovn_kubernetes_dest }}"
+ version: "{{ ovn_kubernetes_version }}"
+ force: yes
+ when: ovn_kubernetes_source_type == "source"
+ - name: getting binaries
+ block:
+ - name: download ovn-kubernetes tarball
+ get_url:
+ url: "{{ ovn_kubernetes_url }}"
+ dest: /tmp/ovn-kubernetes.tar.gz
+ - name: extract ovn-kubernetes source code
+ unarchive:
+ src: /tmp/ovn-kubernetes.tar.gz
+ dest: /tmp/
+ remote_src: yes
+ - name: rename extracted folder
+ command: "mv /tmp/ovn-kubernetes-{{ ovn_kubernetes_version }}/ {{ ovn_kubernetes_dest }}/"
+ when: ovn_kubernetes_source_type == "tarball"
+ - name: make ovnkube files
+ make:
+ chdir: "{{ ovn_kubernetes_dest }}/go-controller"
+ - name: install ovnkube files
+ make:
+ chdir: "{{ ovn_kubernetes_dest }}/go-controller"
+ target: install
+ become: yes
+ - name: create OVN Kubernetes config file
+ become: yes
+ blockinfile:
+ path: /etc/openvswitch/ovn_k8s.conf
+ create: yes
+ block: |
+ [logging]
+ loglevel=5
+ logfile=/var/log/openvswitch/ovnkube.log
+
+ [cni]
+ conf-dir=/etc/cni/net.d
+ plugin=ovn-k8s-cni-overlay
+ - name: create ovnkube logging directory
+ file:
+ path: /var/log/openvswitch
+ state: directory
+
+- hosts: ovn-central
+ become: yes
+ vars:
+ central_node_ip: "{{ hostvars[groups['ovn-central'][0]]['ansible_ssh_host'] }}"
+ tasks:
+ - name: create ovnkube central systemd service
+ blockinfile:
+ path: /etc/systemd/system/ovn-k8s-central.service
+ create: yes
+ block: |
+ [Unit]
+ Description=OVN Central Daemon
+
+ [Service]
+ ExecStart=/usr/bin/ovnkube \
+ -net-controller \
+ -init-master="{{ ansible_hostname }}" \
+ -init-node="{{ ansible_hostname }}" \
+ -nodeport \
+ -k8s-kubeconfig=/etc/kubernetes/admin.conf \
+ -k8s-token="test" \
+ -nb-address="tcp://{{ central_node_ip }}:6641" \
+ -sb-address="tcp://{{ central_node_ip }}:6642"
+
+ [Install]
+ WantedBy=multi-user.target
+ - name: start ovnkube central systemd service
+ service:
+ name: ovn-k8s-central
+ state: started
+ enabled: yes
+
+- hosts: ovn-controller
+ become: yes
+ vars:
+ central_node_ip: "{{ hostvars[groups['ovn-central'][0]]['ansible_ssh_host'] }}"
+ tasks:
+ - name: create ovnkube controller systemd service
+ blockinfile:
+ path: /etc/systemd/system/ovn-k8s-host.service
+ create: yes
+ block: |
+ [Unit]
+ Description=OVN Controller Daemon
+
+ [Service]
+ ExecStart=/usr/bin/ovnkube \
+ -init-gateways \
+ -init-node="{{ ansible_hostname }}" \
+ -nodeport \
+ -k8s-kubeconfig=/etc/kubernetes/admin.conf \
+ -k8s-token="test" \
+ -nb-address="tcp://{{ central_node_ip }}:6641" \
+ -sb-address="tcp://{{ central_node_ip }}:6642"
+
+ [Install]
+ WantedBy=multi-user.target
+ - name: start ovnkube controller systemd service
+ service:
+ name: ovn-k8s-host
+ state: started
+ enabled: yes
diff --git a/vagrant/playbooks/configure-ovn.yml b/vagrant/playbooks/configure-ovn.yml
new file mode 100644
index 00000000..3fd2c765
--- /dev/null
+++ b/vagrant/playbooks/configure-ovn.yml
@@ -0,0 +1,109 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: ovn-central:ovn-controller
+ become: yes
+ tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ ansible_os_family }}.yml"
+ - name: get Wand GPI files
+ get_url:
+ url: https://packages.wand.net.nz/keyring.gpg
+ dest: /etc/apt/trusted.gpg.d/wand.gpg
+ - name: add WAND Debian Repo
+ apt_repository:
+ repo: "deb https://packages.wand.net.nz {{ ansible_lsb.codename }} main"
+ state: present
+ - name: install OpenVSwitch packages
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ openvswitch_pkgs }}"
+ - name: install Open Virtual Network components
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ ovn_pkgs }}"
+ - name: start OpenVSwitch services
+ service:
+ name: "{{ openvswitch_service }}"
+ state: started
+
+- hosts: ovn-central
+ become: yes
+ tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ ansible_os_family }}.yml"
+ - name: install Open Virtual Network central components
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ ovn_central_pkgs }}"
+ - name: enable remote connections to southbound and northbound dbs
+ lineinfile:
+ path: /etc/default/ovn-central
+ line: "OVN_CTL_OPTS=\" --db-sb-create-insecure-remote=yes --db-nb-create-insecure-remote=yes\""
+ state: present
+ when: ansible_os_family == "Debian"
+ - name: start OVN northbound database services
+ service:
+ name: "{{ ovn_central_service }}"
+ state: restarted
+
+- hosts: ovn-controller
+ become: yes
+ vars:
+ ovn_central_ips: "{{ groups['ovn-central'] | map('extract', hostvars, ['ansible_ssh_host']) | join(',') }}"
+ tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ ansible_os_family }}.yml"
+ - name: stop the ovn-controller service
+ service:
+ name: "{{ ovn_controller_service }}"
+ state: stopped
+ - name: configure OpenVSwitch databases
+ openvswitch_db:
+ table: Open_vSwitch
+ record: .
+ col: external_ids
+ key: ovn-remote
+ value: \""tcp:{{ item }}:6642"\"
+ with_items: "{{ ovn_central_ips }}"
+ - name: enable overlay network protocols
+ openvswitch_db:
+ table: Open_vSwitch
+ record: .
+ col: external_ids
+ key: ovn-encap-type
+ value: geneve
+ - name: configure the overlay network local endpoint IP address.
+ openvswitch_db:
+ table: Open_vSwitch
+ record: .
+ col: external_ids
+ key: ovn-encap-ip
+ value: "{{ ansible_default_ipv4.address }}"
+ - name: start the ovn-controller service
+ service:
+ name: "{{ ovn_controller_service }}"
+ state: started
+ - name: ensuring that br-int bridge exists
+ openvswitch_bridge:
+ bridge: br-int
+ state: present
+ fail_mode: secure
diff --git a/vagrant/playbooks/configure-virtlet.yml b/vagrant/playbooks/configure-virtlet.yml
new file mode 100644
index 00000000..fcc33716
--- /dev/null
+++ b/vagrant/playbooks/configure-virtlet.yml
@@ -0,0 +1,233 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ become: yes
+ vars:
+ images_file: /tmp/images.yaml
+ pre_tasks:
+ - name: Load krd variables
+ include_vars:
+ file: krd-vars.yml
+ roles:
+ - andrewrothstein.kubectl
+ - { role: geerlingguy.docker, when: virtlet_source_type == "source" }
+ tasks:
+ - name: create Virtlet binary folder
+ file:
+ state: directory
+ path: "{{ virtlet_dest }}"
+ - name: apply virtlet extraRuntime label
+ command: "/usr/local/bin/kubectl label node {{ item }} extraRuntime=virtlet --overwrite"
+ with_inventory_hostnames: virtlet
+ - name: create image translations confimap file
+ blockinfile:
+ path: "{{ images_file }}"
+ create: yes
+ block: |
+ translations:
+ - name: ubuntu/16.04
+ url: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ - regexp: 'centos/(\d+)-(\d+)'
+ url: 'https://cloud.centos.org/centos/$1/images/CentOS-$1-x86_64-GenericCloud-$2.qcow2'
+ - name: fedora
+ url: https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
+ {% if lookup('env','http_proxy') != "" %}
+ transports:
+ "":
+ proxy: "{{ lookup('env','http_proxy') }}"
+ {% endif %}
+ - name: install image translations configmap
+ shell: "/usr/local/bin/kubectl create configmap -n kube-system virtlet-image-translations --from-file {{ images_file }} --dry-run -o yaml | /usr/local/bin/kubectl apply -f -"
+ ignore_errors: True
+ - name: create Virtlet folder
+ file:
+ state: directory
+ path: "{{ virtlet_dest }}"
+ - name: getting source code
+ block:
+ - name: clone Virtlet repo
+ git:
+ repo: "{{ virtlet_url }}"
+ dest: "{{ virtlet_dest }}"
+ version: "{{ virtlet_version }}"
+ force: yes
+ - name: configure proxy values for docker service
+ block:
+ - name: create docker config folder
+ file:
+ state: directory
+ path: "/etc/systemd/system/docker.service.d"
+ - name: Configure docker service to use http_proxy env value
+ blockinfile:
+ dest: "/etc/systemd/system/docker.service.d/http-proxy.conf"
+ create: yes
+ block: |
+ [Service]
+ Environment="HTTP_PROXY={{ lookup('env','http_proxy') }}"
+ when:
+ - lookup('env','http_proxy') != "fooproxy"
+ - name: Configure docker service to use https_proxy env value
+ blockinfile:
+ dest: "/etc/systemd/system/docker.service.d/https-proxy.conf"
+ create: yes
+ block: |
+ [Service]
+ Environment="HTTPS_PROXY={{ lookup('env','https_proxy') }}"
+ when:
+ - lookup('env','https_proxy') != "fooproxy"
+ - name: Configure docker service to use no_proxy env value
+ blockinfile:
+ dest: "/etc/systemd/system/docker.service.d/no-proxy.conf"
+ create: yes
+ block: |
+ [Service]
+ Environment="NO_PROXY={{ lookup('env','no_proxy') }}"
+ when:
+ - lookup('env','no_proxy') != "fooproxy"
+ - name: reload systemd
+ command: systemctl daemon-reload
+ - name: restart docker service
+ service:
+ name: docker
+ state: restarted
+ when: lookup('env','http_proxy') != "fooproxy" or lookup('env','https_proxy') != "fooproxy" or lookup('env','no_proxy') != "fooproxy"
+ - name: build virtlet source code
+ command: ./cmd.sh build
+ args:
+ chdir: "{{ virtlet_dest }}/build"
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ when: virtlet_source_type == "source"
+ - name: download virtletctl
+ get_url:
+ url: "{{ virtlet_url }}"
+ dest: "{{ virtlet_dest }}/virtletctl"
+ when: virtlet_source_type == "binary"
+ - name: set virtletctl execution permissions
+ file:
+ path: "{{ virtlet_dest }}/virtletctl"
+ mode: "+x"
+ - name: install virtletctl as kubectl plugin
+ command: "{{ virtlet_dest }}/virtletctl install"
+ - name: create Virtlet k8s objects
+ shell: "/usr/local/bin/kubectl plugin virt gen | /usr/local/bin/kubectl apply -f -"
+ ignore_errors: True
+ - name: wait for Virtlet daemonset
+ shell: "/usr/local/bin/kubectl get ds virtlet -n=kube-system -o=jsonpath --template={.status.numberReady}"
+ register: daemonset
+ until:
+ - '1'
+ retries: 6
+ delay: 10
+
+- hosts: virtlet
+ become: yes
+ tasks:
+ - name: Load krd variables
+ include_vars:
+ file: krd-vars.yml
+ - name: create CRIProxy binary folder
+ file:
+ state: directory
+ path: "{{ criproxy_dest }}"
+ - name: disable AppArmor in all nodes
+ service:
+ name: apparmor
+ state: stopped
+ enabled: no
+ when: ansible_os_family == "Debian"
+ - name: modify args for kubelet service
+ lineinfile:
+ dest: /etc/systemd/system/kubelet.service
+ line: " --container-runtime=remote --container-runtime-endpoint=unix:///run/criproxy.sock --image-service-endpoint=unix:///run/criproxy.sock --enable-controller-attach-detach=false \\"
+ insertafter: '^ExecStart=/usr/local/bin/kubelet *'
+ state: present
+ - name: create dockershim service
+ blockinfile:
+ path: /etc/systemd/system/dockershim.service
+ create: yes
+ block: |
+ [Unit]
+ Description=dockershim for criproxy
+
+ [Service]
+ EnvironmentFile=-/etc/kubernetes/kubelet.env
+ ExecStartPre=-/bin/mkdir -p /var/lib/kubelet/volume-plugins
+ ExecStart=/usr/local/bin/kubelet --experimental-dockershim --port 11250 \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $KUBELET_API_SERVER \
+ $KUBELET_ADDRESS \
+ $KUBELET_PORT \
+ $KUBELET_HOSTNAME \
+ $KUBE_ALLOW_PRIV \
+ $KUBELET_ARGS \
+ $DOCKER_SOCKET \
+ $KUBELET_NETWORK_PLUGIN \
+ $KUBELET_VOLUME_PLUGIN \
+ $KUBELET_CLOUDPROVIDER
+ Restart=always
+ StartLimitInterval=0
+ RestartSec=10
+
+ [Install]
+ RequiredBy=criproxy.service
+ - name: getting source code
+ block:
+ - name: clone CRIProxy repo
+ git:
+ repo: "{{ criproxy_url }}"
+ dest: "{{ criproxy_dest }}"
+ version: "{{ criproxy_version }}"
+ force: yes
+ - name: build criproxy source code
+ command: ./build-package.sh
+ args:
+ chdir: "{{ criproxy_dest }}"
+ when: criproxy_source_type == "source"
+ - name: download CRIproxy package
+ get_url:
+ url: "{{ criproxy_url }}"
+ dest: "{{ criproxy_dest }}/criproxy"
+ when: criproxy_source_type == "binary"
+ - name: set criproxy execution permissions
+ file:
+ path: "{{ criproxy_dest }}/criproxy"
+ mode: "+x"
+ - name: create criproxy service
+ blockinfile:
+ path: /etc/systemd/system/criproxy.service
+ create: yes
+ block: |
+ [Unit]
+ Description=CRI Proxy
+
+ [Service]
+ ExecStart={{ criproxy_dest }}/criproxy -v 3 -logtostderr -connect /var/run/dockershim.sock,virtlet.cloud:/run/virtlet.sock -listen /run/criproxy.sock
+ Restart=always
+ StartLimitInterval=0
+ RestartSec=10
+
+ [Install]
+ WantedBy=kubelet.service
+ - name: start criproxy and dockershim services
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items:
+ - dockershim
+ - criproxy
+ - name: restart kubelet services
+ service:
+ name: kubelet
+ state: restarted
diff --git a/vagrant/playbooks/krd-vars.yml b/vagrant/playbooks/krd-vars.yml
new file mode 100644
index 00000000..7aacb8db
--- /dev/null
+++ b/vagrant/playbooks/krd-vars.yml
@@ -0,0 +1,50 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+base_dest: /tmp
+
+multus_dest: "{{ base_dest }}/multus-cni"
+multus_source_type: "tarball"
+multus_version: 2.0
+multus_url: "https://github.com/intel/multus-cni/releases/download/v{{ multus_version }}/multus-cni_v{{ multus_version }}_linux_amd64.tar.gz"
+#multus_source_type: "source"
+#multus_version: def72938cd2fb272eb3a6f64a8162b1049404357
+#multus_url: "https://github.com/intel/multus-cni"
+
+ovn_kubernetes_dest: "{{ base_dest }}/ovn-kubernetes"
+ovn_kubernetes_source_type: "tarball"
+ovn_kubernetes_version: 0.3.0
+ovn_kubernetes_url: "https://github.com/openvswitch/ovn-kubernetes/archive/v{{ ovn_kubernetes_version }}.tar.gz"
+#ovn_kubernetes_source_type: "source"
+#ovn_kubernetes_version: 456a0857956988f968bb08644c650ba826592ec1
+#ovn_kubernetes_url: "https://github.com/openvswitch/ovn-kubernetes"
+
+criproxy_dest: "{{ base_dest }}/criproxy"
+criproxy_source_type: "binary"
+criproxy_version: 0.12.0
+criproxy_url: "https://github.com/Mirantis/criproxy/releases/download/v{{ criproxy_version }}/criproxy"
+#criproxy_source_type: "source"
+#criproxy_version: b5ca5a6cec278e2054dface4f7a3e111fb9ab84b
+#criproxy_url: "https://github.com/Mirantis/criproxy"
+virtlet_dest: "{{ base_dest }}/virtlet"
+virtlet_source_type: "binary"
+virtlet_version: 1.1.2
+virtlet_url: "https://github.com/Mirantis/virtlet/releases/download/v{{ virtlet_version }}/virtletctl"
+#virtlet_source_type: "source"
+#virtlet_version: 68e11b8f1db2c78b063126899f0e60910700975d
+#virtlet_url: "https://github.com/Mirantis/virtlet"
+
+nfd_dest: "{{ base_dest }}/nfd"
+nfd_source_type: "source"
+nfd_version: 175305b1ad73be7301ac94add475cec6fef797a9
+nfd_url: "https://github.com/kubernetes-incubator/node-feature-discovery"
+
+go_version: 1.10.3
+kubespray_version: 2.6.0
diff --git a/vagrant/setup.sh b/vagrant/setup.sh
new file mode 100755
index 00000000..d4927dad
--- /dev/null
+++ b/vagrant/setup.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o nounset
+set -o pipefail
+
+vagrant_version=2.1.2
+if ! $(vagrant version &>/dev/null); then
+ enable_vagrant_install=true
+else
+ if [[ "$vagrant_version" != "$(vagrant version | awk 'NR==1{print $3}')" ]]; then
+ enable_vagrant_install=true
+ fi
+fi
+
+function usage {
+ cat <<EOF
+usage: $0 -p <PROVIDER>
+Installation of vagrant and its dependencies in Linux OS
+
+Argument:
+ -p Vagrant provider
+EOF
+}
+
+while getopts ":p:" OPTION; do
+ case $OPTION in
+ p)
+ provider=$OPTARG
+ ;;
+ \?)
+ usage
+ exit 1
+ ;;
+ esac
+done
+if [[ -z "${provider+x}" ]]; then
+ usage
+ exit 1
+fi
+
+case $provider in
+ "virtualbox" | "libvirt" )
+ export VAGRANT_DEFAULT_PROVIDER=${provider}
+ ;;
+ * )
+ usage
+ exit 1
+esac
+source /etc/os-release || source /usr/lib/os-release
+
+libvirt_group="libvirt"
+packages=()
+case ${ID,,} in
+ *suse)
+ INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends"
+
+ # Vagrant installation
+ if [[ "${enable_vagrant_install+x}" ]]; then
+ vagrant_pgp="pgp_keys.asc"
+ wget -q https://keybase.io/hashicorp/$vagrant_pgp
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.rpm
+ gpg --quiet --with-fingerprint $vagrant_pgp
+ sudo rpm --import $vagrant_pgp
+ sudo rpm --checksig vagrant_${vagrant_version}_x86_64.rpm
+ sudo rpm --install vagrant_${vagrant_version}_x86_64.rpm
+ rm vagrant_${vagrant_version}_x86_64.rpm
+ rm $vagrant_pgp
+ fi
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ wget -q http://download.virtualbox.org/virtualbox/rpm/opensuse/$VERSION/virtualbox.repo -P /etc/zypp/repos.d/
+ $INSTALLER_CMD --enablerepo=epel dkms
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | rpm --import -
+ packages+=(VirtualBox-5.1)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm zlib-devel libxml2-devel libxslt-devel make)
+ # NFS
+ packages+=(nfs-kernel-server)
+ ;;
+ esac
+ sudo zypper -n ref
+ ;;
+
+ ubuntu|debian)
+ libvirt_group="libvirtd"
+ INSTALLER_CMD="sudo -H -E apt-get -y -q=3 install"
+
+ # Vagrant installation
+ if [[ "${enable_vagrant_install+x}" ]]; then
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.deb
+ sudo dpkg -i vagrant_${vagrant_version}_x86_64.deb
+ rm vagrant_${vagrant_version}_x86_64.deb
+ fi
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ echo "deb http://download.virtualbox.org/virtualbox/debian trusty contrib" >> /etc/apt/sources.list
+ wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -
+ packages+=(virtualbox-5.1 dkms)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt-bin ebtables dnsmasq libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev)
+ # NFS
+ packages+=(nfs-kernel-server)
+ ;;
+ esac
+ sudo apt-get update
+ ;;
+
+ rhel|centos|fedora)
+ PKG_MANAGER=$(which dnf || which yum)
+ sudo $PKG_MANAGER updateinfo
+ INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -q -y install"
+
+ # Vagrant installation
+ if [[ "${enable_vagrant_install+x}" ]]; then
+ wget -q https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.rpm
+ $INSTALLER_CMD vagrant_${vagrant_version}_x86_64.rpm
+ rm vagrant_${vagrant_version}_x86_64.rpm
+ fi
+
+ case $VAGRANT_DEFAULT_PROVIDER in
+ virtualbox)
+ wget -q http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo -P /etc/yum.repos.d
+ $INSTALLER_CMD --enablerepo=epel dkms
+ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | rpm --import -
+ packages+=(VirtualBox-5.1)
+ ;;
+ libvirt)
+ # vagrant-libvirt dependencies
+ packages+=(qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm)
+ # NFS
+ packages+=(nfs-utils nfs-utils-lib)
+ ;;
+ esac
+ ;;
+
+esac
+
+if ! which pip; then
+ curl -sL https://bootstrap.pypa.io/get-pip.py | sudo python
+fi
+sudo pip install --upgrade pip
+sudo pip install tox
+
+${INSTALLER_CMD} ${packages[@]}
+if [[ ${http_proxy+x} ]]; then
+ vagrant plugin install vagrant-proxyconf
+fi
+if [ $VAGRANT_DEFAULT_PROVIDER == libvirt ]; then
+ vagrant plugin install vagrant-libvirt
+ sudo usermod -a -G $libvirt_group $USER # This might require to reload user's group assigments
+ sudo systemctl restart libvirtd
+fi
diff --git a/vagrant/tests/generic_simulator/Dockerfile b/vagrant/tests/generic_simulator/Dockerfile
new file mode 100644
index 00000000..202cafc6
--- /dev/null
+++ b/vagrant/tests/generic_simulator/Dockerfile
@@ -0,0 +1,27 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+FROM python:2.7
+
+ARG HTTP_PROXY=${HTTP_PROXY}
+ARG HTTPS_PROXY=${HTTPS_PROXY}
+
+ENV http_proxy $HTTP_PROXY
+ENV https_proxy $HTTPS_PROXY
+
+EXPOSE 8080
+
+RUN mkdir -p /{tmp,etc}/generic_sim
+
+WORKDIR /opt/generic_sim/
+
+COPY . .
+RUN pip install --no-cache-dir -r requirements.txt
+
+CMD [ "python", "generic_sim.py" ]
diff --git a/vagrant/tests/generic_simulator/aai/responses.yml b/vagrant/tests/generic_simulator/aai/responses.yml
new file mode 100644
index 00000000..f6d5fcd0
--- /dev/null
+++ b/vagrant/tests/generic_simulator/aai/responses.yml
@@ -0,0 +1,189 @@
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne:
+ GET:
+ body: '{"cloud-owner":"CloudOwner","cloud-region-id":"RegionOne","cloud-type":"openstack","owner-defined-type":"t1","cloud-region-version":"RegionOne","identity-url":"http://keystone:8080/v3","cloud-zone":"z1","complex-name":"clli1","sriov-automation":false,"cloud-extra-info":"","resource-version":"1524845154715"}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/internal:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/internal","Node
+ Not Found:No Node of type availability-zone found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/internal","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/nova:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/nova","Node
+ Not Found:No Node of type availability-zone found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/availability-zones/availability-zone/nova","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/100:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/100","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/100","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/110:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/110","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/110","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/111:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/111","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/111","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/112:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/112","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/112","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/113:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/113","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/113","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/114:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/114","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/114","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/115:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/115","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/115","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/116:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/116","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/116","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/117:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/117","Node
+ Not Found:No Node of type flavor found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/flavors/flavor/117","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/images/image/660709df-e90b-471f-ac57-d8c2555e573d:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/images/image/660709df-e90b-471f-ac57-d8c2555e573d","Node
+ Not Found:No Node of type image found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/images/image/660709df-e90b-471f-ac57-d8c2555e573d","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/3543226ffed44daf90a2f71f36c00b8d:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/3543226ffed44daf90a2f71f36c00b8d","Node
+ Not Found:No Node of type tenant found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/3543226ffed44daf90a2f71f36c00b8d","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/b8f5d85bbcd84af28d7caa62d39f05c7:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/b8f5d85bbcd84af28d7caa62d39f05c7","Node
+ Not Found:No Node of type tenant found at: cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/tenants/tenant/b8f5d85bbcd84af28d7caa62d39f05c7","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne:
+ GET:
+ body: '{"cloud-owner":"CloudOwner","cloud-region-id":"RegionOne","cloud-type":"openstack","owner-defined-type":"t1","cloud-region-version":"RegionOne","identity-url":"http://multicloud-ocata:80/api/multicloud-titanium_cloud/v0/CloudOwner_RegionOne/identity/v2.0","cloud-zone":"z1","complex-name":"clli1","sriov-automation":false,"cloud-extra-info":"","resource-version":"1524845276291"}'
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne/esr-system-info-list:
+ GET:
+ body: '{"esr-system-info":[{"esr-system-info-id":"4ce895ad-82f7-4476-b5eb-d19d19585da2","service-url":"http://keystone:8080/v3","user-name":"admin","password":"secret","system-type":"VIM","ssl-insecure":true,"cloud-domain":"Default","default-tenant":"admin","resource-version":"1524845155617"}]}'
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/pservers/pserver/compute-0:
+ GET:
+ body: '{"requestError":{"serviceException":{"messageId":"SVC3001","text":"Resource
+ not found for %1 using id %2 (msg=%3) (ec=%4)","variables":["GET","cloud-infrastructure/pservers/pserver/compute-0","Node
+ Not Found:No Node of type pserver found at: cloud-infrastructure/pservers/pserver/compute-0","ERR.5.4.6114"]}}}'
+ content_type: application/json
+ status_code: 200
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
+aai/v13/cloud-infrastructure/pservers/pserver/compute-0/relationship-list/relationship:
+ PUT:
+ body: ''
+ content_type: application/json
+ status_code: 200
diff --git a/vagrant/tests/generic_simulator/generic_sim.py b/vagrant/tests/generic_simulator/generic_sim.py
new file mode 100644
index 00000000..4392b652
--- /dev/null
+++ b/vagrant/tests/generic_simulator/generic_sim.py
@@ -0,0 +1,109 @@
+# Copyright 2018 Intel Corporation, Inc
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import logging
+
+import web
+from web import webapi
+import yaml
+
+urls = (
+ '/(.*)','MockController'
+)
+
+def setup_logger(name, log_file, level=logging.DEBUG):
+ print("Configuring the logger...")
+ handler = logging.FileHandler(log_file)
+ formatter = logging.Formatter('%(message)s')
+ handler.setFormatter(formatter)
+
+ logger = logging.getLogger(name)
+ logger.setLevel(level)
+ logger.addHandler(handler)
+
+ return logger
+
+
+class MockResponse:
+ def __init__(self, http_verb, status_code,
+ content_type="application/json", body="{}",
+ headers={}):
+ self.http_verb = http_verb.lower()
+ self.status_code = status_code
+ self.content_type = content_type
+ self.body = body
+ self.headers = headers
+
+def _parse_responses(parsed_responses):
+ result = {}
+ for path, responses in parsed_responses.iteritems():
+ new_path = path
+ if path.startswith("/"):
+ new_path = path[1:]
+
+ result[new_path] = []
+ for http_verb, response in responses.iteritems():
+ result[new_path].append(MockResponse(http_verb, **response))
+ return result
+
+def load_responses(filename):
+ print("Loading responses from configuration file..")
+ with open(filename) as yaml_file:
+ responses_file = yaml.safe_load(yaml_file)
+ responses_map = _parse_responses(responses_file)
+ return responses_map
+
+
+class MockController:
+
+ def _do_action(self, action):
+ logger.info('{}'.format(web.ctx.env.get('wsgi.input').read()))
+ action = action.lower()
+ url = web.ctx['fullpath']
+ try:
+ if url.startswith("/"):
+ url = url[1:]
+ response = [ r for r in responses_map[url] if r.http_verb == action][0]
+ for header, value in response.headers.iteritems():
+ web.header(header, value)
+ web.header('Content-Type', response.content_type)
+ print(response.body)
+ return response.body
+ except:
+ webapi.NotFound()
+
+ def DELETE(self, url):
+ return self._do_action("delete")
+
+ def HEAD(self, url):
+ return self._do_action("head")
+
+ def PUT(self, url):
+ return self._do_action("put")
+
+ def GET(self, url):
+ return self._do_action("get")
+
+ def POST(self, url):
+ return self._do_action("post")
+
+ def PATCH(self, url):
+ return self._do_action("patch")
+
+
+logger = setup_logger('mock_controller', '/tmp/generic_sim/output.log')
+responses_map = load_responses('/etc/generic_sim/responses.yml')
+app = web.application(urls, globals())
+if __name__ == "__main__":
+ app.run()
diff --git a/vagrant/tests/generic_simulator/requirements.txt b/vagrant/tests/generic_simulator/requirements.txt
new file mode 100644
index 00000000..a0b6aae2
--- /dev/null
+++ b/vagrant/tests/generic_simulator/requirements.txt
@@ -0,0 +1,11 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+PyYAML
+web.py
diff --git a/vagrant/tests/integration_cFW.sh b/vagrant/tests/integration_cFW.sh
new file mode 100755
index 00000000..e4b305f4
--- /dev/null
+++ b/vagrant/tests/integration_cFW.sh
@@ -0,0 +1,194 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+rm -f $HOME/*.yaml
+packetgen_deployment_name=packetgen
+sink_deployment_name=sink
+firewall_deployment_name=firewall
+
+cat << NET > $HOME/unprotected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: unprotected-private-net-cidr
+spec:
+ config: '{
+ "name": "unprotected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.10.0/24"
+ }
+}'
+NET
+
+cat << NET > $HOME/protected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: protected-private-net-cidr
+spec:
+ config: '{
+ "name": "protected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.20.0/24"
+ }
+}'
+NET
+
+cat << NET > $HOME/onap-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: onap-private-net-cidr
+spec:
+ config: '{
+ "name": "onap",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+cat << DEPLOYMENT > $HOME/$packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $packetgen_deployment_name
+ image: electrocucaracha/packetgen
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 256Mi
+DEPLOYMENT
+
+cat << DEPLOYMENT > $HOME/$firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
+ ]'
+ spec:
+ containers:
+ - name: $firewall_deployment_name
+ image: electrocucaracha/firewall
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+cat << DEPLOYMENT > $HOME/$sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $sink_deployment_name
+ image: electrocucaracha/sink
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+if $(kubectl version &>/dev/null); then
+ kubectl apply -f $HOME/unprotected-private-net-cidr-network.yaml
+ kubectl apply -f $HOME/protected-private-net-cidr-network.yaml
+ kubectl apply -f $HOME/onap-private-net-cidr-network.yaml
+
+ for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
+ kubectl delete deployment $deployment_name --ignore-not-found=true --now
+ while kubectl get deployment $deployment_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/$deployment_name.yaml
+ done
+
+ for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+fi
diff --git a/vagrant/tests/integration_vFW.sh b/vagrant/tests/integration_vFW.sh
new file mode 100755
index 00000000..fa48d7c5
--- /dev/null
+++ b/vagrant/tests/integration_vFW.sh
@@ -0,0 +1,295 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+rm -f $HOME/*.yaml
+packetgen_deployment_name=packetgen
+sink_deployment_name=sink
+firewall_deployment_name=firewall
+image_name=virtlet.cloud/ubuntu/16.04
+
+if [[ ! -f $HOME/.ssh/id_rsa.pub ]]; then
+ echo -e "\n\n\n" | ssh-keygen -t rsa -N ""
+fi
+ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
+
+cat << NET > $HOME/unprotected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: unprotected-private-net-cidr
+spec:
+ config: '{
+ "name": "unprotected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.10.0/24"
+ }
+}'
+NET
+
+cat << NET > $HOME/protected-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: protected-private-net-cidr
+spec:
+ config: '{
+ "name": "protected",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.20.0/24"
+ }
+}'
+NET
+
+cat << NET > $HOME/onap-private-net-cidr-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: onap-private-net-cidr
+spec:
+ config: '{
+ "name": "onap",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+proxy="#!/bin/bash"
+if [[ -n "${http_proxy+x}" ]]; then
+ proxy+="
+ export http_proxy=$http_proxy
+ echo \"Acquire::http::Proxy \\\"$http_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy
+"
+fi
+if [[ -n "${https_proxy+x}" ]]; then
+ proxy+="
+ export https_proxy=$https_proxy
+ echo \"Acquire::https::Proxy \\\"$https_proxy\\\";\" | sudo tee --append /etc/apt/apt.conf.d/01proxy
+"
+fi
+if [[ -n "${no_proxy+x}" ]]; then
+ proxy+="
+ export no_proxy=$no_proxy"
+fi
+
+cat << DEPLOYMENT > $HOME/$packetgen_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $packetgen_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$packetgen_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $packetgen_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 256Mi
+DEPLOYMENT
+
+cat << DEPLOYMENT > $HOME/$firewall_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $firewall_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$firewall_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "unprotected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth2" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth3" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $firewall_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+cat << DEPLOYMENT > $HOME/$sink_deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $sink_deployment_name
+ labels:
+ app: vFirewall
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: vFirewall
+ template:
+ metadata:
+ labels:
+ app: vFirewall
+ annotations:
+ VirtletCloudInitUserData: |
+ users:
+ - default
+ - name: admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ plain_text_passwd: secret
+ groups: sudo
+ ssh_authorized_keys:
+ - $ssh_key
+ VirtletCloudInitUserDataScript: |
+ $proxy
+
+ wget -O - https://raw.githubusercontent.com/electrocucaracha/vFW-demo/master/$sink_deployment_name | sudo -E bash
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "protected-private-net-cidr", "interfaceRequest": "eth1" },
+ { "name": "onap-private-net-cidr", "interfaceRequest": "eth2" }
+ ]'
+ kubernetes.io/target-runtime: virtlet.cloud
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $sink_deployment_name
+ image: $image_name
+ imagePullPolicy: IfNotPresent
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ memory: 160Mi
+DEPLOYMENT
+
+if $(kubectl version &>/dev/null); then
+ kubectl apply -f $HOME/unprotected-private-net-cidr-network.yaml
+ kubectl apply -f $HOME/protected-private-net-cidr-network.yaml
+ kubectl apply -f $HOME/onap-private-net-cidr-network.yaml
+
+ for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
+ kubectl delete deployment $deployment_name --ignore-not-found=true --now
+ while kubectl get deployment $deployment_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/$deployment_name.yaml
+ done
+
+ for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods | grep $deployment_name | awk '{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $deployment_name : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+ for deployment_name in $packetgen_deployment_name $firewall_deployment_name $sink_deployment_name; do
+ pod_name=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
+ vm=$(kubectl plugin virt virsh list | grep ".*$deployment_name" | awk '{print $2}')
+ echo "Pod name: $pod_name Virsh domain: $vm"
+ echo "ssh -i ~/.ssh/id_rsa.pub admin@$(kubectl get pods $pod_name -o jsonpath="{.status.podIP}")"
+ echo "=== Virtlet details ===="
+ echo "$(kubectl plugin virt virsh dumpxml $vm | grep VIRTLET_)\n"
+ done
+fi
diff --git a/vagrant/tests/multus.sh b/vagrant/tests/multus.sh
new file mode 100755
index 00000000..c5f7fc71
--- /dev/null
+++ b/vagrant/tests/multus.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+rm -f $HOME/*.yaml
+
+pod_name=multus-pod
+deployment_name=multus-deployment
+
+cat << NET > $HOME/bridge-network.yaml
+apiVersion: "kubernetes.cni.cncf.io/v1"
+kind: Network
+metadata:
+ name: bridge-conf
+spec:
+ config: '{
+ "name": "mynet",
+ "type": "bridge",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.10.0.0/16"
+ }
+}'
+NET
+
+cat << POD > $HOME/$pod_name.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: $pod_name
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "bridge-conf", "interfaceRequest": "eth1" },
+ { "name": "bridge-conf", "interfaceRequest": "eth2" }
+ ]'
+spec: # specification of the pod's contents
+ containers:
+ - name: $pod_name
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+POD
+
+cat << DEPLOYMENT > $HOME/$deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $deployment_name
+ labels:
+ app: multus
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: multus
+ template:
+ metadata:
+ labels:
+ app: multus
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "bridge-conf", "interfaceRequest": "eth1" },
+ { "name": "bridge-conf", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: $deployment_name
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+DEPLOYMENT
+
+if $(kubectl version &>/dev/null); then
+ kubectl apply -f $HOME/bridge-network.yaml
+
+ kubectl delete pod $pod_name --ignore-not-found=true --now
+ kubectl delete deployment $deployment_name --ignore-not-found=true --now
+ while kubectl get pod $pod_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/$pod_name.yaml
+ while kubectl get deployment $deployment_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/$deployment_name.yaml
+ sleep 5
+
+ deployment_pod=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
+ for pod in $pod_name $deployment_pod; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $pod : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+
+ for pod in $pod_name $deployment_pod; do
+ echo "===== $pod details ====="
+ kubectl exec -it $pod -- ip a
+ multus_nic=$(kubectl exec -it $pod -- ifconfig | grep "eth1")
+ if [ -z "$multus_nic" ]; then
+ exit 1
+ fi
+ done
+fi
diff --git a/vagrant/tests/nfd.sh b/vagrant/tests/nfd.sh
new file mode 100755
index 00000000..17548206
--- /dev/null
+++ b/vagrant/tests/nfd.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+rm -f $HOME/*.yaml
+
+pod_name=nfd-pod
+
+cat << POD > $HOME/$pod_name.yaml
+apiVersion:
+ v1
+kind: Pod
+metadata:
+ name: $pod_name
+ labels:
+ env: test
+spec:
+ containers:
+ - name: nginx
+ image: nginx
+nodeSelector:
+ node.alpha.kubernetes-incubator.io/nfd-network-SRIOV: true
+POD
+
+if $(kubectl version &>/dev/null); then
+ labels=$(kubectl get nodes -o json | jq .items[].metadata.labels)
+
+ echo $labels
+ if [[ $labels != *"node.alpha.kubernetes-incubator.io"* ]]; then
+ exit 1
+ fi
+
+ kubectl delete pod $pod_name --ignore-not-found=true --now
+ while kubectl get pod $pod_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/$pod_name.yaml --validate=false
+
+ for pod in $pod_name; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $pod : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+fi
diff --git a/vagrant/tests/ovn-kubernetes.sh b/vagrant/tests/ovn-kubernetes.sh
new file mode 100755
index 00000000..95d216bf
--- /dev/null
+++ b/vagrant/tests/ovn-kubernetes.sh
@@ -0,0 +1,136 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+apache_pod_name=apachetwin
+nginx_pod_name=nginxtwin
+
+cat << APACHEPOD > $HOME/apache-pod.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: $apache_pod_name
+ labels:
+ name: webserver
+spec:
+ containers:
+ - name: apachetwin
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+APACHEPOD
+
+cat << NGINXPOD > $HOME/nginx-pod.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: $nginx_pod_name
+ labels:
+ name: webserver
+spec:
+ containers:
+ - name: nginxtwin
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+NGINXPOD
+
+cat << APACHEEW > $HOME/apache-e-w.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: apacheservice
+ role: service
+ name: apacheservice
+spec:
+ ports:
+ - port: 8800
+ targetPort: 80
+ protocol: TCP
+ name: tcp
+ selector:
+ name: webserver
+APACHEEW
+
+cat << APACHENS > $HOME/apache-n-s.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: apacheexternal
+ role: service
+ name: apacheexternal
+spec:
+ ports:
+ - port: 8800
+ targetPort: 80
+ protocol: TCP
+ name: tcp
+ selector:
+ name: webserver
+ type: NodePort
+APACHENS
+
+if $(kubectl version &>/dev/null); then
+ kubectl apply -f $HOME/apache-e-w.yaml
+ kubectl apply -f $HOME/apache-n-s.yaml
+
+ kubectl delete pod $apache_pod_name --ignore-not-found=true --now
+ kubectl delete pod $nginx_pod_name --ignore-not-found=true --now
+ while kubectl get pod $apache_pod_name &>/dev/null; do
+ sleep 5
+ done
+ while kubectl get pod $nginx_pod_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/apache-pod.yaml
+ kubectl create -f $HOME/nginx-pod.yaml
+
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $apache_pod_name | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $nginx_pod_name | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ apache_ovn=$(kubectl get pod $apache_pod_name -o jsonpath="{.metadata.annotations.ovn}")
+ nginx_ovn=$(kubectl get pod $nginx_pod_name -o jsonpath="{.metadata.annotations.ovn}")
+
+ echo $apache_ovn
+ if [[ $apache_ovn != *"\"ip_address\":\"11.11."* ]]; then
+ exit 1
+ fi
+
+ echo $nginx_ovn
+ if [[ $nginx_ovn != *"\"ip_address\":\"11.11."* ]]; then
+ exit 1
+ fi
+fi
diff --git a/vagrant/tests/plugin.sh b/vagrant/tests/plugin.sh
new file mode 100755
index 00000000..a40cb60c
--- /dev/null
+++ b/vagrant/tests/plugin.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+base_url="http://localhost:8081/v1/vnf_instances/"
+cloud_region_id="krd"
+namespace="default"
+csar_id="94e414f6-9ca4-11e8-bb6a-52540067263b"
+
+if [[ -z $(docker images -q generic_sim) ]]; then
+ BUILD_ARGS="--no-cache"
+ if [ $HTTP_PROXY ]; then
+ BUILD_ARGS+=" --build-arg HTTP_PROXY=${HTTP_PROXY}"
+ fi
+ if [ $HTTPS_PROXY ]; then
+ BUILD_ARGS+=" --build-arg HTTPS_PROXY=${HTTPS_PROXY}"
+ fi
+ pushd generic_simulator
+ docker build ${BUILD_ARGS} -t generic_sim:latest .
+ popd
+fi
+
+if [[ $(docker ps -q --all --filter "name=aai") ]]; then
+ docker rm aai -f
+fi
+docker run --name aai -v $(pwd)/output:/tmp/generic_sim/ -v $(pwd)/generic_simulator/aai/:/etc/generic_sim/ -p 8443:8080 -d generic_sim
+
+vnf_id_list=$(curl -s "${base_url}${cloud_region_id}/${namespace}" | jq -r '.vnf_id_list')
+
+mkdir -p ${CSAR_DIR}/${csar_id}
+cat << SEQ > ${CSAR_DIR}/${csar_id}/sequence.yaml
+deployment:
+ - deployment.yaml
+service:
+ - service.yaml
+SEQ
+cat << DEPLOYMENT > ${CSAR_DIR}/${csar_id}/deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: multus-deployment
+ labels:
+ app: multus
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: multus
+ template:
+ metadata:
+ labels:
+ app: multus
+ annotations:
+ kubernetes.v1.cni.cncf.io/networks: '[
+ { "name": "bridge-conf", "interfaceRequest": "eth1" },
+ { "name": "bridge-conf", "interfaceRequest": "eth2" }
+ ]'
+ spec:
+ containers:
+ - name: multus-deployment
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
+DEPLOYMENT
+cat << SERVICE > ${CSAR_DIR}/${csar_id}/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: sise-svc
+spec:
+ ports:
+ - port: 80
+ protocol: TCP
+ selector:
+ app: sise
+SERVICE
+
+payload_raw="
+{
+ \"cloud_region_id\": \"$cloud_region_id\",
+ \"namespace\": \"$namespace\",
+ \"csar_id\": \"$csar_id\"
+}
+"
+payload=$(echo $payload_raw | tr '\n' ' ')
+curl -v -X POST -d "$payload" "${base_url}"
diff --git a/vagrant/tests/virtlet.sh b/vagrant/tests/virtlet.sh
new file mode 100755
index 00000000..a8af071f
--- /dev/null
+++ b/vagrant/tests/virtlet.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+rm -f $HOME/*.yaml
+
+virtlet_image=virtlet.cloud/fedora
+pod_name=virtlet-pod
+deployment_name=virtlet-deployment
+
+cat << POD > $HOME/$pod_name.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: $pod_name
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ VirtletCloudInitUserDataScript: |
+ #!/bin/sh
+ echo hello world
+spec:
+ # This nodeAffinity specification tells Kubernetes to run this
+ # pod only on the nodes that have extraRuntime=virtlet label.
+ # This label is used by Virtlet DaemonSet to select nodes
+ # that must have Virtlet runtime
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $pod_name
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: $virtlet_image
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for "kubectl attach -t" to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi
+POD
+
+cat << DEPLOYMENT > $HOME/$deployment_name.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $deployment_name
+ labels:
+ app: virtlet
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: virtlet
+ template:
+ metadata:
+ labels:
+ app: virtlet
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ VirtletCloudInitUserDataScript: |
+ #!/bin/sh
+ echo hello world
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: $deployment_name
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: $virtlet_image
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for "kubectl attach -t" to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi
+DEPLOYMENT
+
+if $(kubectl version &>/dev/null); then
+ kubectl delete pod $pod_name --ignore-not-found=true --now
+ kubectl delete deployment $deployment_name --ignore-not-found=true --now
+ while kubectl get pod $pod_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/$pod_name.yaml
+ while kubectl get deployment $deployment_name &>/dev/null; do
+ sleep 5
+ done
+ kubectl create -f $HOME/$deployment_name.yaml
+ sleep 5
+
+ deployment_pod=$(kubectl get pods | grep $deployment_name | awk '{print $1}')
+ for pod in $pod_name $deployment_pod; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $pod : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+
+ kubectl plugin virt virsh list
+ for pod in $pod_name $deployment_name; do
+ virsh_image=$(kubectl plugin virt virsh list | grep "virtlet-.*-$pod")
+ if [[ -z "$virsh_image" ]]; then
+ exit 1
+ fi
+ done
+fi