summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitlint13
-rw-r--r--.gitmodules5
-rw-r--r--docs/conf.py2
-rw-r--r--docs/index.rst2
-rw-r--r--docs/oom_developer_guide.rst4
-rw-r--r--docs/oom_hardcoded_certificates.rst2
-rw-r--r--docs/oom_quickstart_guide.rst2
-rw-r--r--docs/oom_quickstart_guide_helm3.rst252
-rw-r--r--docs/oom_user_guide.rst15
-rw-r--r--docs/oom_user_guide_helm3.rst728
-rw-r--r--docs/release-notes-amsterdam.rst2
-rw-r--r--docs/release-notes-beijing.rst2
-rw-r--r--docs/release-notes-casablanca.rst2
-rw-r--r--docs/release-notes-dublin.rst2
-rw-r--r--docs/release-notes-elalto.rst2
-rw-r--r--docs/release-notes-frankfurt.rst2
-rw-r--r--kubernetes/common/common/templates/_affinities.tpl109
-rw-r--r--kubernetes/oof/values.yaml6
m---------kubernetes/robot0
-rw-r--r--kubernetes/robot/.gitignore3
-rw-r--r--kubernetes/robot/.helmignore21
-rw-r--r--kubernetes/robot/Chart.yaml18
-rw-r--r--kubernetes/robot/INFO.yaml141
-rwxr-xr-xkubernetes/robot/demo-k8s.sh235
-rwxr-xr-xkubernetes/robot/ete-k8s.sh106
-rwxr-xr-xkubernetes/robot/eteHelm-k8s.sh60
-rwxr-xr-xkubernetes/robot/instantiate-k8s.sh150
-rw-r--r--kubernetes/robot/onap_dev.pvt27
-rw-r--r--kubernetes/robot/requirements.yaml22
-rw-r--r--kubernetes/robot/resources/config/eteshare/config/robot_properties.py345
-rw-r--r--kubernetes/robot/resources/config/lighttpd/authorization1
-rw-r--r--kubernetes/robot/resources/config/lighttpd/lighttpd.conf57
-rw-r--r--kubernetes/robot/resources/config/lighttpd/ssl/README.TXT13
-rw-r--r--kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.crt22
-rw-r--r--kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.csr18
-rw-r--r--kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.key28
-rw-r--r--kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.pem50
-rw-r--r--kubernetes/robot/scripts/demoscript/README1
-rw-r--r--kubernetes/robot/scripts/etescript/README1
-rwxr-xr-xkubernetes/robot/scripts/etescript/hvves-etescript.sh77
-rwxr-xr-xkubernetes/robot/scripts/etescript/security-etescript.sh57
-rwxr-xr-xkubernetes/robot/scripts/etescript/vnfsdk-etescript.sh49
-rw-r--r--kubernetes/robot/scripts/helmscript/README1
-rw-r--r--kubernetes/robot/templates/NOTES.txt25
-rw-r--r--kubernetes/robot/templates/_ingress_svc_hostname.tpl157
-rw-r--r--kubernetes/robot/templates/configmap.yaml52
-rw-r--r--kubernetes/robot/templates/deployment.yaml117
-rw-r--r--kubernetes/robot/templates/ingress.yaml1
-rw-r--r--kubernetes/robot/templates/pv.yaml40
-rw-r--r--kubernetes/robot/templates/pvc.yaml39
-rw-r--r--kubernetes/robot/templates/service.yaml39
-rw-r--r--kubernetes/robot/values.yaml453
-rwxr-xr-xkubernetes/so/components/so-bpmn-infra/resources/config/overrides/override.yaml4
-rw-r--r--kubernetes/so/components/soHelpers/templates/_certificates.tpl2
-rw-r--r--tox.ini14
55 files changed, 3581 insertions, 17 deletions
diff --git a/.gitlint b/.gitlint
new file mode 100644
index 0000000000..9f3a51d8a0
--- /dev/null
+++ b/.gitlint
@@ -0,0 +1,13 @@
+[title-match-regex]
+regex=^\[[A-Z0-9]+]\[*[A-Z0-9]*]* [A-Z]+[\S ]*$
+
+[body-match-regex]
+regex=.{20,}
+
+[ignore-by-title]
+regex=^Release(.*)
+ignore=title-match-regex,body-min-length
+
+[ignore-body-lines]
+regex=^(Issue-ID|Signed-off-by|Change-Id|Reported-by|Depends-on|Co-authored-by|Fixes|Acked-by|Reviewed-by|CC)
+
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index 3f0f4efe38..0000000000
--- a/.gitmodules
+++ /dev/null
@@ -1,5 +0,0 @@
-[submodule "kubernetes/robot"]
- path = kubernetes/robot
- url = ../testsuite/oom
- branch = master
- ignore = dirty
diff --git a/docs/conf.py b/docs/conf.py
index 8f40e8b817..3b28eb74a8 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -12,4 +12,4 @@ intersphinx_mapping = {}
html_last_updated_fmt = '%d-%b-%y %H:%M'
def setup(app):
- app.add_stylesheet("css/ribbon_onap.css")
+ app.add_css_file("css/ribbon_onap.css")
diff --git a/docs/index.rst b/docs/index.rst
index c3902ecae0..68b38de9aa 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -12,7 +12,9 @@ OOM Documentation Repository
oom_project_description.rst
oom_quickstart_guide.rst
+ oom_quickstart_guide_helm3.rst
oom_user_guide.rst
+ oom_user_guide_helm3.rst
oom_developer_guide.rst
oom_cloud_setup_guide.rst
release-notes.rst
diff --git a/docs/oom_developer_guide.rst b/docs/oom_developer_guide.rst
index fccf453925..3d8cdb1128 100644
--- a/docs/oom_developer_guide.rst
+++ b/docs/oom_developer_guide.rst
@@ -8,7 +8,7 @@
.. _Helm Charts: https://github.com/kubernetes/charts
.. _Kubernetes: https://Kubernetes.io/
.. _Docker: https://www.docker.com/
-.. _Nexus: https://nexus.onap.org/#welcome
+.. _Nexus: https://nexus.onap.org/
.. _AWS Elastic Block Store: https://aws.amazon.com/ebs/
.. _Azure File: https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction
.. _GCE Persistent Disk: https://cloud.google.com/compute/docs/disks/
@@ -1131,7 +1131,7 @@ access to watch the kubernetes events and get service annotation by
Kubernetes APIs. The token can be found in the kubectl configuration file
*~/.kube/config*
-More details can be found here `MSB installation <http://onap.readthedocs.io/en/latest/submodules/msb/apigateway.git/docs/platform/installation.html>`__.
+More details can be found here `MSB installation <https://docs.onap.org/projects/onap-msb-apigateway/en/latest/platform/installation.html>`_.
.. MISC
.. ====
diff --git a/docs/oom_hardcoded_certificates.rst b/docs/oom_hardcoded_certificates.rst
index 9f6aa1ff0e..c4392c701f 100644
--- a/docs/oom_hardcoded_certificates.rst
+++ b/docs/oom_hardcoded_certificates.rst
@@ -3,6 +3,8 @@
.. http://creativecommons.org/licenses/by/4.0
.. Copyright 2018 Amdocs, Bell Canada, 2020 Nokia Solutions and Networks
+:orphan:
+
.. Links
.. _hardcoded-certificates-label:
diff --git a/docs/oom_quickstart_guide.rst b/docs/oom_quickstart_guide.rst
index 28d3597923..6b1c69cc44 100644
--- a/docs/oom_quickstart_guide.rst
+++ b/docs/oom_quickstart_guide.rst
@@ -158,7 +158,7 @@ follows::
**Step 6.** Build a local Helm repository (from the kubernetes directory)::
- > make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] all ; make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] onap
+ > make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] all
`HELM_BIN`
Sets the helm binary to be used. The default value use helm from PATH. Allow the user to have
diff --git a/docs/oom_quickstart_guide_helm3.rst b/docs/oom_quickstart_guide_helm3.rst
new file mode 100644
index 0000000000..5a3076426e
--- /dev/null
+++ b/docs/oom_quickstart_guide_helm3.rst
@@ -0,0 +1,252 @@
+.. This work is licensed under a
+.. Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2019-2020 Amdocs, Bell Canada, Orange, Samsung
+.. _oom_quickstart_guide_helm3:
+.. _quick-start-label-helm3:
+
+OOM Quick Start Guide Helm3 (experimental)
+###########################################
+
+.. figure:: oomLogoV2-medium.png
+ :align: right
+
+Once a Kubernetes environment is available (follow the instructions in
+:ref:`cloud-setup-guide-label` if you don't have a cloud environment
+available), follow the following instructions to deploy ONAP.
+
+**Step 1.** Clone the OOM repository from ONAP gerrit::
+
+ > git clone -b <BRANCH> http://gerrit.onap.org/r/oom --recurse-submodules
+ > cd oom/kubernetes
+
+where <BRANCH> can be an official release tag, such as
+
+* 4.0.0-ONAP for Dublin
+* 5.0.1-ONAP for El Alto
+* 6.0.0 for Frankfurt
+* 7.0.0 for Guilin
+
+**Step 2.** Install Helm Plugins required to deploy ONAP::
+
+ > cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins
+ > helm plugin install https://github.com/chartmuseum/helm-push.git
+
+**Step 3** Install Chartmuseum::
+
+ > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum
+ > chmod +x ./chartmuseum
+ > mv ./chartmuseum /usr/local/bin
+
+**Step 4.** Customize the Helm charts like `oom/kubernetes/onap/values.yaml` or
+an override file like `onap-all.yaml`, `onap-vfw.yaml` or `openstack.yaml` file
+to suit your deployment with items like the OpenStack tenant information.
+
+.. note::
+ Standard and example override files (e.g. `onap-all.yaml`, `openstack.yaml`) can be found in
+ the `oom/kubernetes/onap/resources/overrides/` directory.
+
+
+ a. You may want to selectively enable or disable ONAP components by changing
+ the ``enabled: true/false`` flags.
+
+
+ b. Encrypt the OpenStack password using the shell tool for Robot and put it in
+ the Robot Helm charts or Robot section of `openstack.yaml`
+
+
+ c. Encrypt the OpenStack password using the java based script for SO Helm charts
+ or SO section of `openstack.yaml`.
+
+
+ d. Update the OpenStack parameters that will be used by Robot, SO and APPC Helm
+ charts or use an override file to replace them.
+
+ e. Add in the command line a value for the global master password (global.masterPassword).
+
+
+
+a. Enabling/Disabling Components:
+Here is an example of the nominal entries that need to be provided.
+We have different values file available for different contexts.
+
+.. literalinclude:: ../kubernetes/onap/values.yaml
+ :language: yaml
+
+
+b. Generating ROBOT Encrypted Password:
+The Robot encrypted Password uses the same encryption.key as SO but an
+openssl algorithm that works with the python based Robot Framework.
+
+.. note::
+ To generate Robot ``openStackEncryptedPasswordHere``::
+
+ cd so/resources/config/mso/
+ /oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p``
+
+c. Generating SO Encrypted Password:
+The SO Encrypted Password uses a java based encryption utility since the
+Java encryption library is not easy to integrate with openssl/python that
+Robot uses in Dublin and upper versions.
+
+.. note::
+ To generate SO ``openStackEncryptedPasswordHere`` and ``openStackSoEncryptedPassword``
+ ensure `default-jdk` is installed::
+
+ apt-get update; apt-get install default-jdk
+
+ Then execute::
+
+ SO_ENCRYPTION_KEY=`cat ~/oom/kubernetes/so/resources/config/mso/encryption.key`
+ OS_PASSWORD=XXXX_OS_CLEARTESTPASSWORD_XXXX
+
+ git clone http://gerrit.onap.org/r/integration
+ cd integration/deployment/heat/onap-rke/scripts
+
+ javac Crypto.java
+ java Crypto "$OS_PASSWORD" "$SO_ENCRYPTION_KEY"
+
+d. Update the OpenStack parameters:
+
+There are assumptions in the demonstration VNF Heat templates about the
+networking available in the environment. To get the most value out of these
+templates and the automation that can help confirm the setup is correct, please
+observe the following constraints.
+
+
+``openStackPublicNetId:``
+ This network should allow Heat templates to add interfaces.
+ This need not be an external network, floating IPs can be assigned to the
+ ports on the VMs that are created by the heat template but its important that
+ neutron allow ports to be created on them.
+
+``openStackPrivateNetCidr: "10.0.0.0/16"``
+ This ip address block is used to assign OA&M addresses on VNFs to allow ONAP
+ connectivity. The demonstration Heat templates assume that 10.0 prefix can be
+ used by the VNFs and the demonstration ip addressing plan embodied in the
+ preload template prevent conflicts when instantiating the various VNFs. If
+ you need to change this, you will need to modify the preload data in the
+ Robot Helm chart like integration_preload_parameters.py and the
+ demo/heat/preload_data in the Robot container. The size of the CIDR should
+ be sufficient for ONAP and the VMs you expect to create.
+
+``openStackOamNetworkCidrPrefix: "10.0"``
+ This ip prefix mush match the openStackPrivateNetCidr and is a helper
+ variable to some of the Robot scripts for demonstration. A production
+ deployment need not worry about this setting but for the demonstration VNFs
+ the ip asssignment strategy assumes 10.0 ip prefix.
+
+Example Keystone v2.0
+
+.. literalinclude:: example-integration-override.yaml
+ :language: yaml
+
+Example Keystone v3 (required for Rocky and later releases)
+
+.. literalinclude:: example-integration-override-v3.yaml
+ :language: yaml
+
+
+**Step 5.** To setup a local Helm server to server up the ONAP charts::
+
+ > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 &
+
+Note the port number that is listed and use it in the Helm repo add as
+follows::
+
+ > helm repo add local http://127.0.0.1:8879
+
+**Step 6.** Verify your Helm repository setup with::
+
+ > helm repo list
+ NAME URL
+ local http://127.0.0.1:8879
+
+**Step 7.** Build a local Helm repository (from the kubernetes directory)::
+
+ > make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] all ; make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] onap
+
+`HELM_BIN`
+ Sets the helm binary to be used. The default value use helm from PATH
+
+
+**Step 8.** Display the onap charts that available to be deployed::
+
+ > helm repo update
+ > helm search repo onap
+
+.. literalinclude:: helm-search.txt
+
+.. note::
+ The setup of the Helm repository is a one time activity. If you make changes
+ to your deployment charts or values be sure to use ``make`` to update your
+ local Helm repository.
+
+**Step 9.** Once the repo is setup, installation of ONAP can be done with a
+single command
+
+.. note::
+ The ``--timeout 900s`` is currently required in Dublin and later
+ versions up to address long running initialization tasks for DMaaP
+ and SO. Without this timeout value both applications may fail to
+ deploy.
+
+.. danger::
+ We've added the master password on the command line.
+ You shouldn't put it in a file for safety reason
+ please don't forget to change the value to something random
+
+ A space is also added in front of the command so "history" doesn't catch it.
+ This masterPassword is very sensitive, please be careful!
+
+
+To deploy all ONAP applications use this command::
+
+ > cd oom/kubernetes
+ > helm deploy dev local/onap --namespace onap --set global.masterPassword=myAwesomePasswordThatINeedToChange -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/environment.yaml -f onap/resources/overrides/openstack.yaml --timeout 900s
+
+All override files may be customized (or replaced by other overrides) as per
+needs.
+
+`onap-all.yaml`
+ Enables the modules in the ONAP deployment. As ONAP is very modular, it is
+ possible to customize ONAP and disable some components through this
+ configuration file.
+
+`onap-all-ingress-nginx-vhost.yaml`
+ Alternative version of the `onap-all.yaml` but with global ingress controller
+ enabled. It requires the cluster configured with the nginx ingress controller
+ and load balancer. Please use this file instead `onap-all.yaml` if you want
+ to use experimental ingress controller feature.
+
+`environment.yaml`
+ Includes configuration values specific to the deployment environment.
+
+ Example: adapt readiness and liveness timers to the level of performance of
+ your infrastructure
+
+`openstack.yaml`
+ Includes all the OpenStack related information for the default target tenant
+ you want to use to deploy VNFs from ONAP and/or additional parameters for the
+ embedded tests.
+
+**Step 10.** Verify ONAP installation
+
+Use the following to monitor your deployment and determine when ONAP is ready
+for use::
+
+ > kubectl get pods -n onap -o=wide
+
+.. note::
+ While all pods may be in a Running state, it is not a guarantee that all components are running fine.
+
+ Launch the healthcheck tests using Robot to verify that the components are healthy::
+
+ > ~/oom/kubernetes/robot/ete-k8s.sh onap health
+
+**Step 11.** Undeploy ONAP
+::
+
+ > helm undeploy dev
+
+More examples of using the deploy and undeploy plugins can be found here: https://wiki.onap.org/display/DW/OOM+Helm+%28un%29Deploy+plugins
diff --git a/docs/oom_user_guide.rst b/docs/oom_user_guide.rst
index 70f19df7b6..7c8e16b5bf 100644
--- a/docs/oom_user_guide.rst
+++ b/docs/oom_user_guide.rst
@@ -12,7 +12,8 @@
.. _Helm Documentation: https://docs.helm.sh/helm/
.. _Helm: https://docs.helm.sh/
.. _Kubernetes: https://Kubernetes.io/
-.. _Kubernetes LoadBalancer: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
+.. _Kubernetes LoadBalancer: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
+.. _`Docker installation guide`: https://docs.docker.com/engine/install/
.. _user-guide-label:
OOM User Guide
@@ -55,8 +56,12 @@ ONAP with a few simple commands.
Pre-requisites
--------------
-Your environment must have both the Kubernetes `kubectl` and Helm setup as a
-one time activity.
+Your environment must have Docker installed as well as both the Kubernetes
+`kubectl` and Helm setup as a one time activity.
+
+Install Docker
+~~~~~~~~~~~~~~
+Follow official `Docker installation guide`_.
Install Kubectl
~~~~~~~~~~~~~~~
@@ -414,7 +419,7 @@ below::
Ensure you've disabled any proxy settings the browser you are using to access
the portal and then simply access now the new ssl-encrypted URL:
-https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm
+``https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm``
.. note::
Using the HTTPS based Portal URL the Browser needs to be configured to accept
@@ -481,7 +486,7 @@ have been created - a sample from the ONAP Integration labs follows:
.. figure:: consulHealth.png
:align: center
-To see the real-time health of a deployment go to: http://<kubernetes IP>:30270/ui/
+To see the real-time health of a deployment go to: ``http://<kubernetes IP>:30270/ui/``
where a GUI much like the following will be found:
diff --git a/docs/oom_user_guide_helm3.rst b/docs/oom_user_guide_helm3.rst
new file mode 100644
index 0000000000..b687fe8bd3
--- /dev/null
+++ b/docs/oom_user_guide_helm3.rst
@@ -0,0 +1,728 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung
+.. _oom_user_guide:
+
+.. Links
+.. _Curated applications for Kubernetes: https://github.com/kubernetes/charts
+.. _Services: https://kubernetes.io/docs/concepts/services-networking/service/
+.. _ReplicaSet: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
+.. _StatefulSet: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/
+.. _Helm Documentation: https://docs.helm.sh/helm/
+.. _Helm: https://docs.helm.sh/
+.. _Kubernetes: https://Kubernetes.io/
+.. _Kubernetes LoadBalancer: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
+.. _user-guide-label:
+
+OOM User Guide helm3 (experimental)
+###################################
+
+The ONAP Operations Manager (OOM) provide the ability to manage the entire
+life-cycle of an ONAP installation, from the initial deployment to final
+decommissioning. This guide provides instructions for users of ONAP to
+use the Kubernetes_/Helm_ system as a complete ONAP management system.
+
+This guide provides many examples of Helm command line operations. For a
+complete description of these commands please refer to the `Helm
+Documentation`_.
+
+.. figure:: oomLogoV2-medium.png
+ :align: right
+
+The following sections describe the life-cycle operations:
+
+- Deploy_ - with built-in component dependency management
+- Configure_ - unified configuration across all ONAP components
+- Monitor_ - real-time health monitoring feeding to a Consul UI and Kubernetes
+- Heal_- failed ONAP containers are recreated automatically
+- Scale_ - cluster ONAP services to enable seamless scaling
+- Upgrade_ - change-out containers or configuration with little or no service
+ impact
+- Delete_ - cleanup individual containers or entire deployments
+
+.. figure:: oomLogoV2-Deploy.png
+ :align: right
+
+Deploy
+======
+
+The OOM team with assistance from the ONAP project teams, have built a
+comprehensive set of Helm charts, yaml files very similar to TOSCA files, that
+describe the composition of each of the ONAP components and the relationship
+within and between components. Using this model Helm is able to deploy all of
+ONAP with a few simple commands.
+
+Pre-requisites
+--------------
+Your environment must have both the Kubernetes `kubectl` and Helm setup as a
+one time activity.
+
+Install Kubectl
+~~~~~~~~~~~~~~~
+Enter the following to install kubectl (on Ubuntu, there are slight differences
+on other O/Ss), the Kubernetes command line interface used to manage a
+Kubernetes cluster::
+
+ > curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.10/bin/linux/amd64/kubectl
+ > chmod +x ./kubectl
+ > sudo mv ./kubectl /usr/local/bin/kubectl
+ > mkdir ~/.kube
+
+Paste kubectl config from Rancher (see the :ref:`cloud-setup-guide-label` for
+alternative Kubernetes environment setups) into the `~/.kube/config` file.
+
+Verify that the Kubernetes config is correct::
+
+ > kubectl get pods --all-namespaces
+
+At this point you should see six Kubernetes pods running.
+
+Install Helm
+~~~~~~~~~~~~
+Helm is used by OOM for package and configuration management. To install Helm,
+enter the following::
+
+ > wget https://get.helm.sh/helm-v3.3.4-linux-amd64.tar.gz
+ > tar -zxvf helm-v3.3.4-linux-amd64.tar.gz
+ > sudo mv linux-amd64/helm /usr/local/bin/helm
+
+Verify the Helm version with::
+
+ > helm version
+
+Install the Helm Repo
+---------------------
+Once kubectl and Helm are setup, one needs to setup a local Helm server to
+server up the ONAP charts::
+
+ > helm install osn/onap
+
+.. note::
+ The osn repo is not currently available so creation of a local repository is
+ required.
+
+Helm is able to use charts served up from a repository and comes setup with a
+default CNCF provided `Curated applications for Kubernetes`_ repository called
+stable which should be removed to avoid confusion::
+
+ > helm repo remove stable
+
+.. To setup the Open Source Networking Nexus repository for helm enter::
+.. > helm repo add osn 'https://nexus3.onap.org:10001/helm/helm-repo-in-nexus/master/'
+
+To prepare your system for an installation of ONAP, you'll need to::
+
+ > git clone -b guilin --recurse-submodules -j2 http://gerrit.onap.org/r/oom
+ > cd oom/kubernetes
+
+
+To install a local Helm server::
+
+ > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum
+ > chmod +x ./chartmuseum
+ > mv ./chartmuseum /usr/local/bin
+
+To setup a local Helm server to server up the ONAP charts::
+
+ > mkdir -p ~/helm3-storage
+ > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 &
+
+Note the port number that is listed and use it in the Helm repo add as
+follows::
+
+ > helm repo add local http://127.0.0.1:8879
+
+To get a list of all of the available Helm chart repositories::
+
+ > helm repo list
+ NAME URL
+ local http://127.0.0.1:8879
+
+Then build your local Helm repository::
+
+ > make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] all
+
+`HELM_BIN`
+ Sets the helm binary to be used. The default value use helm from PATH
+
+The Helm search command reads through all of the repositories configured on the
+system, and looks for matches::
+
+ > helm search -l
+ NAME VERSION DESCRIPTION
+ local/appc 2.0.0 Application Controller
+ local/clamp 2.0.0 ONAP Clamp
+ local/common 2.0.0 Common templates for inclusion in other charts
+ local/onap 2.0.0 Open Network Automation Platform (ONAP)
+ local/robot 2.0.0 A helm Chart for kubernetes-ONAP Robot
+ local/so 2.0.0 ONAP Service Orchestrator
+
+In any case, setup of the Helm repository is a one time activity.
+
+Next, install Helm Plugins required to deploy the ONAP Casablanca release::
+
+ > cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins
+
+Once the repo is setup, installation of ONAP can be done with a single
+command::
+
+ > helm deploy development local/onap --namespace onap
+
+This will install ONAP from a local repository in a 'development' Helm release.
+As described below, to override the default configuration values provided by
+OOM, an environment file can be provided on the command line as follows::
+
+ > helm deploy development local/onap --namespace onap -f overrides.yaml
+
+To get a summary of the status of all of the pods (containers) running in your
+deployment::
+
+ > kubectl get pods --all-namespaces -o=wide
+
+.. note::
+ The Kubernetes namespace concept allows for multiple instances of a component
+ (such as all of ONAP) to co-exist with other components in the same
+ Kubernetes cluster by isolating them entirely. Namespaces share only the
+ hosts that form the cluster thus providing isolation between production and
+ development systems as an example. The OOM deployment of ONAP in Beijing is
+ now done within a single Kubernetes namespace where in Amsterdam a namespace
+ was created for each of the ONAP components.
+
+.. note::
+ The Helm `--name` option refers to a release name and not a Kubernetes namespace.
+
+
+To install a specific version of a single ONAP component (`so` in this example)
+with the given release name enter::
+
+ > helm deploy so onap/so --version 3.0.1
+
+To display details of a specific resource or group of resources type::
+
+ > kubectl describe pod so-1071802958-6twbl
+
+where the pod identifier refers to the auto-generated pod identifier.
+
+.. figure:: oomLogoV2-Configure.png
+ :align: right
+
+Configure
+=========
+
+Each project within ONAP has its own configuration data generally consisting
+of: environment variables, configuration files, and database initial values.
+Many technologies are used across the projects resulting in significant
+operational complexity and an inability to apply global parameters across the
+entire ONAP deployment. OOM solves this problem by introducing a common
+configuration technology, Helm charts, that provide a hierarchical
+configuration with the ability to override values with higher
+level charts or command line options.
+
+The structure of the configuration of ONAP is shown in the following diagram.
+Note that key/value pairs of a parent will always take precedence over those
+of a child. Also note that values set on the command line have the highest
+precedence of all.
+
+.. graphviz::
+
+ digraph config {
+ {
+ node [shape=folder]
+ oValues [label="values.yaml"]
+ demo [label="onap-demo.yaml"]
+ prod [label="onap-production.yaml"]
+ oReq [label="requirements.yaml"]
+ soValues [label="values.yaml"]
+ soReq [label="requirements.yaml"]
+ mdValues [label="values.yaml"]
+ }
+ {
+ oResources [label="resources"]
+ }
+ onap -> oResources
+ onap -> oValues
+ oResources -> environments
+ oResources -> oReq
+ oReq -> so
+ environments -> demo
+ environments -> prod
+ so -> soValues
+ so -> soReq
+ so -> charts
+ charts -> mariadb
+ mariadb -> mdValues
+
+ }
+
+The top level onap/values.yaml file contains the values required to be set
+before deploying ONAP. Here is the contents of this file:
+
+.. include:: ../kubernetes/onap/values.yaml
+ :code: yaml
+
+One may wish to create a value file that is specific to a given deployment such
+that it can be differentiated from other deployments. For example, a
+onap-development.yaml file may create a minimal environment for development
+while onap-production.yaml might describe a production deployment that operates
+independently of the developer version.
+
+For example, if the production OpenStack instance was different from a
+developer's instance, the onap-production.yaml file may contain a different
+value for the vnfDeployment/openstack/oam_network_cidr key as shown below.
+
+.. code-block:: yaml
+
+ nsPrefix: onap
+ nodePortPrefix: 302
+ apps: consul msb mso message-router sdnc vid robot portal policy appc aai
+ sdc dcaegen2 log cli multicloud clamp vnfsdk aaf kube2msb
+ dataRootDir: /dockerdata-nfs
+
+ # docker repositories
+ repository:
+ onap: nexus3.onap.org:10001
+ oom: oomk8s
+ aai: aaionap
+ filebeat: docker.elastic.co
+
+ image:
+ pullPolicy: Never
+
+ # vnf deployment environment
+ vnfDeployment:
+ openstack:
+ ubuntu_14_image: "Ubuntu_14.04.5_LTS"
+ public_net_id: "e8f51956-00dd-4425-af36-045716781ffc"
+ oam_network_id: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
+ oam_subnet_id: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
+ oam_network_cidr: "192.168.30.0/24"
+ <...>
+
+
+To deploy ONAP with this environment file, enter::
+
+ > helm deploy local/onap -n onap -f environments/onap-production.yaml
+
+.. include:: environments_onap_demo.yaml
+ :code: yaml
+
+When deploying all of ONAP a requirements.yaml file control which and what
+version of the ONAP components are included. Here is an excerpt of this
+file:
+
+.. code-block:: yaml
+
+ # Referencing a named repo called 'local'.
+ # Can add this repo by running commands like:
+ # > helm serve
+ # > helm repo add local http://127.0.0.1:8879
+ dependencies:
+ <...>
+ - name: so
+ version: ~2.0.0
+ repository: '@local'
+ condition: so.enabled
+ <...>
+
+The ~ operator in the `so` version value indicates that the latest "2.X.X"
+version of `so` shall be used thus allowing the chart to allow for minor
+upgrades that don't impact the so API; hence, version 2.0.1 will be installed
+in this case.
+
+The onap/resources/environment/onap-dev.yaml (see the excerpt below) enables
+for fine grained control on what components are included as part of this
+deployment. By changing this `so` line to `enabled: false` the `so` component
+will not be deployed. If this change is part of an upgrade the existing `so`
+component will be shut down. Other `so` parameters and even `so` child values
+can be modified, for example the `so`'s `liveness` probe could be disabled
+(which is not recommended as this change would disable auto-healing of `so`).
+
+.. code-block:: yaml
+
+ #################################################################
+ # Global configuration overrides.
+ #
+ # These overrides will affect all helm charts (ie. applications)
+ # that are listed below and are 'enabled'.
+ #################################################################
+ global:
+ <...>
+
+ #################################################################
+ # Enable/disable and configure helm charts (ie. applications)
+ # to customize the ONAP deployment.
+ #################################################################
+ aaf:
+ enabled: false
+ <...>
+ so: # Service Orchestrator
+ enabled: true
+
+ replicaCount: 1
+
+ liveness:
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+ <...>
+
+Accessing the ONAP Portal using OOM and a Kubernetes Cluster
+------------------------------------------------------------
+
+The ONAP deployment created by OOM operates in a private IP network that isn't
+publicly accessible (i.e. OpenStack VMs with private internal network) which
+blocks access to the ONAP Portal. To enable direct access to this Portal from a
+user's own environment (a laptop etc.) the portal application's port 8989 is
+exposed through a `Kubernetes LoadBalancer`_ object.
+
+Typically, to be able to access the Kubernetes nodes publicly a public address
+is assigned. In OpenStack this is a floating IP address.
+
+When the `portal-app` chart is deployed a Kubernetes service is created that
+instantiates a load balancer. The LB chooses the private interface of one of
+the nodes as in the example below (10.0.0.4 is private to the K8s cluster only).
+Then to be able to access the portal on port 8989 from outside the K8s &
+OpenStack environment, the user needs to assign/get the floating IP address that
+corresponds to the private IP as follows::
+
+ > kubectl -n onap get services|grep "portal-app"
+ portal-app LoadBalancer 10.43.142.201 10.0.0.4 8989:30215/TCP,8006:30213/TCP,8010:30214/TCP 1d app=portal-app,release=dev
+
+
+In this example, use the 10.0.0.4 private address as a key find the
+corresponding public address which in this example is 10.12.6.155. If you're
+using OpenStack you'll do the lookup with the horizon GUI or the OpenStack CLI
+for your tenant (openstack server list). That IP is then used in your
+`/etc/hosts` to map the fixed DNS aliases required by the ONAP Portal as shown
+below::
+
+ 10.12.6.155 portal.api.simpledemo.onap.org
+ 10.12.6.155 vid.api.simpledemo.onap.org
+ 10.12.6.155 sdc.api.fe.simpledemo.onap.org
+ 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org
+ 10.12.6.155 sdc.dcae.plugin.simpledemo.onap.org
+ 10.12.6.155 portal-sdk.simpledemo.onap.org
+ 10.12.6.155 policy.api.simpledemo.onap.org
+ 10.12.6.155 aai.api.sparky.simpledemo.onap.org
+ 10.12.6.155 cli.api.simpledemo.onap.org
+ 10.12.6.155 msb.api.discovery.simpledemo.onap.org
+ 10.12.6.155 msb.api.simpledemo.onap.org
+ 10.12.6.155 clamp.api.simpledemo.onap.org
+ 10.12.6.155 so.api.simpledemo.onap.org
+ 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org
+
+Ensure you've disabled any proxy settings the browser you are using to access
+the portal and then simply access now the new ssl-encrypted URL:
+https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm
+
+.. note::
+ Using the HTTPS based Portal URL the Browser needs to be configured to accept
+ unsecure credentials.
+ Additionally when opening an Application inside the Portal, the Browser
+ might block the content, which requires to disable the blocking and reloading
+ of the page
+
+.. note::
+ Besides the ONAP Portal the Components can deliver additional user interfaces,
+ please check the Component specific documentation.
+
+.. note::
+
+ | Alternatives Considered:
+
+ - Kubernetes port forwarding was considered but discarded as it would require
+ the end user to run a script that opens up port forwarding tunnels to each of
+ the pods that provides a portal application widget.
+
+ - Reverting to a VNC server similar to what was deployed in the Amsterdam
+ release was also considered but there were many issues with resolution, lack
+ of volume mount, /etc/hosts dynamic update, file upload that were a tall order
+ to solve in time for the Beijing release.
+
+ Observations:
+
+ - If you are not using floating IPs in your Kubernetes deployment and directly attaching
+ a public IP address (i.e. by using your public provider network) to your K8S Node
+ VMs' network interface, then the output of 'kubectl -n onap get services | grep "portal-app"'
+ will show your public IP instead of the private network's IP. Therefore,
+ you can grab this public IP directly (as compared to trying to find the floating
+ IP first) and map this IP in /etc/hosts.
+
+.. figure:: oomLogoV2-Monitor.png
+ :align: right
+
+Monitor
+=======
+
+All highly available systems include at least one facility to monitor the
+health of components within the system. Such health monitors are often used as
+inputs to distributed coordination systems (such as etcd, Zookeeper, or Consul)
+and monitoring systems (such as Nagios or Zabbix). OOM provides two mechanisms
+to monitor the real-time health of an ONAP deployment:
+
+- a Consul GUI for a human operator or downstream monitoring systems and
+ Kubernetes liveness probes that enable automatic healing of failed
+ containers, and
+- a set of liveness probes which feed into the Kubernetes manager which
+ are described in the Heal section.
+
+Within ONAP, Consul is the monitoring system of choice and deployed by OOM in
+two parts:
+
+- a three-way, centralized Consul server cluster is deployed as a highly
+ available monitor of all of the ONAP components, and
+- a number of Consul agents.
+
+The Consul server provides a user interface that allows a user to graphically
+view the current health status of all of the ONAP components for which agents
+have been created - a sample from the ONAP Integration labs follows:
+
+.. figure:: consulHealth.png
+ :align: center
+
+To see the real-time health of a deployment go to: http://<kubernetes IP>:30270/ui/
+where a GUI much like the following will be found:
+
+
+.. figure:: oomLogoV2-Heal.png
+ :align: right
+
+Heal
+====
+
+The ONAP deployment is defined by Helm charts as mentioned earlier. These Helm
+charts are also used to implement automatic recoverability of ONAP components
+when individual components fail. Once ONAP is deployed, a "liveness" probe
+starts checking the health of the components after a specified startup time.
+
+Should a liveness probe indicate a failed container it will be terminated and a
+replacement will be started in its place - containers are ephemeral. Should the
+deployment specification indicate that there are one or more dependencies to
+this container or component (for example a dependency on a database) the
+dependency will be satisfied before the replacement container/component is
+started. This mechanism ensures that, after a failure, all of the ONAP
+components restart successfully.
+
+To test healing, the following command can be used to delete a pod::
+
+ > kubectl delete pod [pod name] -n [pod namespace]
+
+One could then use the following command to monitor the pods and observe the
+pod being terminated and the service being automatically healed with the
+creation of a replacement pod::
+
+ > kubectl get pods --all-namespaces -o=wide
+
+.. figure:: oomLogoV2-Scale.png
+ :align: right
+
+Scale
+=====
+
+Many of the ONAP components are horizontally scalable which allows them to
+adapt to expected offered load. During the Beijing release scaling is static,
+that is during deployment or upgrade a cluster size is defined and this cluster
+will be maintained even in the presence of faults. The parameter that controls
+the cluster size of a given component is found in the values.yaml file for that
+component. Here is an excerpt that shows this parameter:
+
+.. code-block:: yaml
+
+ # default number of instances
+ replicaCount: 1
+
+In order to change the size of a cluster, an operator could use a helm upgrade
+(described in detail in the next section) as follows::
+
+ > helm upgrade --set replicaCount=3 onap/so/mariadb
+
+The ONAP components use Kubernetes provided facilities to build clustered,
+highly available systems including: Services_ with load-balancers, ReplicaSet_,
+and StatefulSet_. Some of the open-source projects used by the ONAP components
+directly support clustered configurations, for example ODL and MariaDB Galera.
+
+The Kubernetes Services_ abstraction to provide a consistent access point for
+each of the ONAP components, independent of the pod or container architecture
+of that component. For example, SDN-C uses OpenDaylight clustering with a
+default cluster size of three but uses a Kubernetes service to and change the
+number of pods in this abstract this cluster from the other ONAP components
+such that the cluster could change size and this change is isolated from the
+other ONAP components by the load-balancer implemented in the ODL service
+abstraction.
+
+A ReplicaSet_ is a construct that is used to describe the desired state of the
+cluster. For example 'replicas: 3' indicates to Kubernetes that a cluster of 3
+instances is the desired state. Should one of the members of the cluster fail,
+a new member will be automatically started to replace it.
+
+Some of the ONAP components many need a more deterministic deployment; for
+example to enable intra-cluster communication. For these applications the
+component can be deployed as a Kubernetes StatefulSet_ which will maintain a
+persistent identifier for the pods and thus a stable network id for the pods.
+For example: the pod names might be web-0, web-1, web-{N-1} for N 'web' pods
+with corresponding DNS entries such that intra service communication is simple
+even if the pods are physically distributed across multiple nodes. An example
+of how these capabilities can be used is described in the Running Consul on
+Kubernetes tutorial.
+
+.. figure:: oomLogoV2-Upgrade.png
+ :align: right
+
+Upgrade
+=======
+
+Helm has built-in capabilities to enable the upgrade of pods without causing a
+loss of the service being provided by that pod or pods (if configured as a
+cluster). As described in the OOM Developer's Guide, ONAP components provide
+an abstracted 'service' end point with the pods or containers providing this
+service hidden from other ONAP components by a load balancer. This capability
+is used during upgrades to allow a pod with a new image to be added to the
+service before removing the pod with the old image. This 'make before break'
+capability ensures minimal downtime.
+
+Prior to doing an upgrade, determine of the status of the deployed charts::
+
+ > helm list
+ NAME REVISION UPDATED STATUS CHART NAMESPACE
+ so 1 Mon Feb 5 10:05:22 2018 DEPLOYED so-2.0.1 default
+
+When upgrading a cluster a parameter controls the minimum size of the cluster
+during the upgrade while another parameter controls the maximum number of nodes
+in the cluster. For example, SNDC configured as a 3-way ODL cluster might
+require that during the upgrade no fewer than 2 pods are available at all times
+to provide service while no more than 5 pods are ever deployed across the two
+versions at any one time to avoid depleting the cluster of resources. In this
+scenario, the SDNC cluster would start with 3 old pods then Kubernetes may add
+a new pod (3 old, 1 new), delete one old (2 old, 1 new), add two new pods (2
+old, 3 new) and finally delete the 2 old pods (3 new). During this sequence
+the constraints of the minimum of two pods and maximum of five would be
+maintained while providing service the whole time.
+
+Initiation of an upgrade is triggered by changes in the Helm charts. For
+example, if the image specified for one of the pods in the SDNC deployment
+specification were to change (i.e. point to a new Docker image in the nexus3
+repository - commonly through the change of a deployment variable), the
+sequence of events described in the previous paragraph would be initiated.
+
+For example, to upgrade a container by changing configuration, specifically an
+environment value::
+
+ > helm deploy onap onap/so --version 2.0.1 --set enableDebug=true
+
+Issuing this command will result in the appropriate container being stopped by
+Kubernetes and replaced with a new container with the new environment value.
+
+To upgrade a component to a new version with a new configuration file enter::
+
+ > helm deploy onap onap/so --version 2.0.2 -f environments/demo.yaml
+
+To fetch release history enter::
+
+ > helm history so
+ REVISION UPDATED STATUS CHART DESCRIPTION
+ 1 Mon Feb 5 10:05:22 2018 SUPERSEDED so-2.0.1 Install complete
+ 2 Mon Feb 5 10:10:55 2018 DEPLOYED so-2.0.2 Upgrade complete
+
+Unfortunately, not all upgrades are successful. In recognition of this the
+lineup of pods within an ONAP deployment is tagged such that an administrator
+may force the ONAP deployment back to the previously tagged configuration or to
+a specific configuration, say to jump back two steps if an incompatibility
+between two ONAP components is discovered after the two individual upgrades
+succeeded.
+
+This rollback functionality gives the administrator confidence that in the
+unfortunate circumstance of a failed upgrade the system can be rapidly brought
+back to a known good state. This process of rolling upgrades while under
+service is illustrated in this short YouTube video showing a Zero Downtime
+Upgrade of a web application while under a 10 million transaction per second
+load.
+
+For example, to roll-back back to previous system revision enter::
+
+ > helm rollback so 1
+
+ > helm history so
+ REVISION UPDATED STATUS CHART DESCRIPTION
+ 1 Mon Feb 5 10:05:22 2018 SUPERSEDED so-2.0.1 Install complete
+ 2 Mon Feb 5 10:10:55 2018 SUPERSEDED so-2.0.2 Upgrade complete
+ 3 Mon Feb 5 10:14:32 2018 DEPLOYED so-2.0.1 Rollback to 1
+
+.. note::
+
+ The description field can be overridden to document actions taken or include
+ tracking numbers.
+
+Many of the ONAP components contain their own databases which are used to
+record configuration or state information. The schemas of these databases may
+change from version to version in such a way that data stored within the
+database needs to be migrated between versions. If such a migration script is
+available it can be invoked during the upgrade (or rollback) by Container
+Lifecycle Hooks. Two such hooks are available, PostStart and PreStop, which
+containers can access by registering a handler against one or both. Note that
+it is the responsibility of the ONAP component owners to implement the hook
+handlers - which could be a shell script or a call to a specific container HTTP
+endpoint - following the guidelines listed on the Kubernetes site. Lifecycle
+hooks are not restricted to database migration or even upgrades but can be used
+anywhere specific operations need to be taken during lifecycle operations.
+
+OOM uses Helm K8S package manager to deploy ONAP components. Each component is
+arranged in a packaging format called a chart - a collection of files that
+describe a set of k8s resources. Helm allows for rolling upgrades of the ONAP
+component deployed. To upgrade a component Helm release you will need an
+updated Helm chart. The chart might have modified, deleted or added values,
+deployment yamls, and more. To get the release name use::
+
+ > helm ls
+
+To easily upgrade the release use::
+
+ > helm upgrade [RELEASE] [CHART]
+
+To roll back to a previous release version use::
+
+ > helm rollback [flags] [RELEASE] [REVISION]
+
+For example, to upgrade the onap-so helm release to the latest SO container
+release v1.1.2:
+
+- Edit so values.yaml which is part of the chart
+- Change "so: nexus3.onap.org:10001/openecomp/so:v1.1.1" to
+ "so: nexus3.onap.org:10001/openecomp/so:v1.1.2"
+- From the chart location run::
+
+ > helm upgrade onap-so
+
+The previous so pod will be terminated and a new so pod with an updated so
+container will be created.
+
+.. figure:: oomLogoV2-Delete.png
+ :align: right
+
+Delete
+======
+
+Existing deployments can be partially or fully removed once they are no longer
+needed. To minimize errors it is recommended that before deleting components
+from a running deployment the operator perform a 'dry-run' to display exactly
+what will happen with a given command prior to actually deleting anything. For
+example::
+
+ > helm undeploy onap --dry-run
+
+will display the outcome of deleting the 'onap' release from the
+deployment.
+To completely delete a release and remove it from the internal store enter::
+
+ > helm undeploy onap
+
+One can also remove individual components from a deployment by changing the
+ONAP configuration values. For example, to remove `so` from a running
+deployment enter::
+
+ > helm undeploy onap-so
+
+will remove `so` as the configuration indicates it's no longer part of the
+deployment. This might be useful if a one wanted to replace just `so` by
+installing a custom version.
diff --git a/docs/release-notes-amsterdam.rst b/docs/release-notes-amsterdam.rst
index 79d2e302f4..6fc229696c 100644
--- a/docs/release-notes-amsterdam.rst
+++ b/docs/release-notes-amsterdam.rst
@@ -5,6 +5,8 @@
.. reserved.
.. _release_notes_amsterdam:
+:orphan:
+
ONAP Operations Manager Release Notes
=====================================
diff --git a/docs/release-notes-beijing.rst b/docs/release-notes-beijing.rst
index 1af7c58dc7..1172a086d2 100644
--- a/docs/release-notes-beijing.rst
+++ b/docs/release-notes-beijing.rst
@@ -5,6 +5,8 @@
.. reserved.
.. _release_notes_beijing:
+:orphan:
+
ONAP Operations Manager Release Notes
=====================================
diff --git a/docs/release-notes-casablanca.rst b/docs/release-notes-casablanca.rst
index f983c59c9d..6a6a196b6b 100644
--- a/docs/release-notes-casablanca.rst
+++ b/docs/release-notes-casablanca.rst
@@ -5,6 +5,8 @@
.. reserved.
.. _release_notes_casablanca:
+:orphan:
+
ONAP Operations Manager Release Notes
=====================================
diff --git a/docs/release-notes-dublin.rst b/docs/release-notes-dublin.rst
index e948af5ebb..1974756ea3 100644
--- a/docs/release-notes-dublin.rst
+++ b/docs/release-notes-dublin.rst
@@ -5,6 +5,8 @@
.. reserved.
.. _release_notes_dublin:
+:orphan:
+
ONAP Operations Manager Release Notes
=====================================
diff --git a/docs/release-notes-elalto.rst b/docs/release-notes-elalto.rst
index 435889ef32..bbbf271a5f 100644
--- a/docs/release-notes-elalto.rst
+++ b/docs/release-notes-elalto.rst
@@ -5,6 +5,8 @@
.. reserved.
.. _release_notes_elalto:
+:orphan:
+
ONAP Operations Manager Release Notes
=====================================
diff --git a/docs/release-notes-frankfurt.rst b/docs/release-notes-frankfurt.rst
index da3ae2a956..7bd4474487 100644
--- a/docs/release-notes-frankfurt.rst
+++ b/docs/release-notes-frankfurt.rst
@@ -4,6 +4,8 @@
.. (c) ONAP Project and its contributors
.. _release_notes_frankfurt:
+:orphan:
+
*************************************
ONAP Operations Manager Release Notes
*************************************
diff --git a/kubernetes/common/common/templates/_affinities.tpl b/kubernetes/common/common/templates/_affinities.tpl
new file mode 100644
index 0000000000..f0802be29d
--- /dev/null
+++ b/kubernetes/common/common/templates/_affinities.tpl
@@ -0,0 +1,109 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+# Copyright © 2020 Bitnami, Orange
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{/*
+Return a soft nodeAffinity definition
+{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.soft" -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . }}
+ {{- end }}
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard nodeAffinity definition
+{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes.hard" -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ key: {{ .key }}
+ operator: In
+ values:
+ {{- range .values }}
+ - {{ . }}
+ {{- end }}
+{{- end -}}
+
+{{/*
+Return a nodeAffinity definition
+{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.nodes" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.nodes.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.nodes.hard" . -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Return a soft podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.soft" -}}
+{{- $component := default "" .component -}}
+preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels: {{- (include "common.matchLabels" (dict "dot" .context "matchLabels" (dict))) | nindent 10 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ namespaces:
+ - {{ include "common.namespace" .context }}
+ topologyKey: kubernetes.io/hostname
+ weight: 1
+{{- end -}}
+
+{{/*
+Return a hard podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}}
+*/}}
+{{- define "common.affinities.pods.hard" -}}
+{{- $component := default "" .component -}}
+requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels: {{- (include "common.matchLabels" (dict "dot" .context "matchLabels" (dict))) | nindent 8 }}
+ {{- if not (empty $component) }}
+ {{ printf "app.kubernetes.io/component: %s" $component }}
+ {{- end }}
+ namespaces:
+ - {{ include "common.namespace" .context }}
+ topologyKey: kubernetes.io/hostname
+{{- end -}}
+
+{{/*
+Return a podAffinity/podAntiAffinity definition
+{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
+*/}}
+{{- define "common.affinities.pods" -}}
+ {{- if eq .type "soft" }}
+ {{- include "common.affinities.pods.soft" . -}}
+ {{- else if eq .type "hard" }}
+ {{- include "common.affinities.pods.hard" . -}}
+ {{- end -}}
+{{- end -}} \ No newline at end of file
diff --git a/kubernetes/oof/values.yaml b/kubernetes/oof/values.yaml
index db7c9d2231..9f74986174 100644
--- a/kubernetes/oof/values.yaml
+++ b/kubernetes/oof/values.yaml
@@ -84,9 +84,9 @@ config:
secret_domain: osdf
aaf_ca_certs: /opt/app/ssl_cert/aaf_root_ca.cer
# config db api
- configDbUrl: http://config.db.url:8080
- configDbGetCellListUrl: 'SDNCConfigDBAPI/getCellList'
- configDbGetNbrListUrl: 'SDNCConfigDBAPI/getNbrList'
+ configDbUrl: http://configdb:8080
+ configDbGetCellListUrl: 'api/sdnc-config-db/v3/getCellList'
+ configDbGetNbrListUrl: 'api/sdnc-config-db/v3/getNbrList'
#aai api
aaiUrl: https://aai:8443
aaiGetLinksUrl: /aai/v16/network/logical-links
diff --git a/kubernetes/robot b/kubernetes/robot
deleted file mode 160000
-Subproject 85b5af5058bbda19b557add185d917f60c2188e
diff --git a/kubernetes/robot/.gitignore b/kubernetes/robot/.gitignore
new file mode 100644
index 0000000000..97a5360f8e
--- /dev/null
+++ b/kubernetes/robot/.gitignore
@@ -0,0 +1,3 @@
+/.project
+/.pydevproject
+*.pyc \ No newline at end of file
diff --git a/kubernetes/robot/.helmignore b/kubernetes/robot/.helmignore
new file mode 100644
index 0000000000..f0c1319444
--- /dev/null
+++ b/kubernetes/robot/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/kubernetes/robot/Chart.yaml b/kubernetes/robot/Chart.yaml
new file mode 100644
index 0000000000..577d2abc34
--- /dev/null
+++ b/kubernetes/robot/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: A helm Chart for kubernetes-ONAP Robot
+name: robot
+version: 7.0.0
diff --git a/kubernetes/robot/INFO.yaml b/kubernetes/robot/INFO.yaml
new file mode 100644
index 0000000000..6f159134b7
--- /dev/null
+++ b/kubernetes/robot/INFO.yaml
@@ -0,0 +1,141 @@
+---
+project: 'testsuite/oom'
+project_creation_date: '2017-02-15'
+lifecycle_state: 'Incubation'
+project_lead: &onap_integration_ptl
+ name: 'Morgan Richomme'
+ email: 'morgan.richomme@orange.com'
+ id: 'mrichomme'
+ company: 'Orange'
+ timezone: 'France/Paris'
+primary_contact: *onap_integration_ptl
+project_category: ''
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.onap.org/projects/TEST'
+ key: 'TEST'
+mailing_list:
+ type: 'groups.io'
+ url: 'lists.onap.org'
+ tag: '<[sub-project_name]>'
+realtime_discussion: ''
+meetings:
+ - type: 'zoom'
+ agenda: 'https://wiki.onap.org/display/DW/Integration+Meeting+Minutes'
+ url: 'https://wiki.onap.org/pages/viewpage.action?pageId=6593670'
+ server: 'n/a'
+ channel: 'n/a'
+ repeats: 'weekly'
+ time: '14:00 UTC'
+repositories:
+ - 'testsuite/oom'
+committers:
+ - <<: *onap_integration_ptl
+ - name: 'Christophe Closset'
+ email: 'cc697w@intl.att.com'
+ company: 'ATT'
+ id: 'ChrisC'
+ timezone: 'Belgium/Namur'
+ - name: 'Daniel Rose'
+ email: 'DR695H@att.com'
+ company: 'ATT'
+ id: 'DR695H'
+ timezone: 'America/New_York'
+ - name: 'Marco Platania'
+ email: 'platania@research.att.com'
+ company: 'ATT'
+ id: 'platania'
+ timezone: 'America/New_York'
+ - name: 'Chengli Wang'
+ email: 'wangchengli@chinamobile.com'
+ company: 'China Mobile'
+ id: 'wangchengli'
+ timezone: 'China/Beijing'
+ - name: 'Brian Freeman'
+ email: 'bf1936@att.com'
+ company: 'AT&T'
+ id: 'bdfreeman1421'
+ timezone: 'America/New_York'
+ - name: 'Morgan Richomme'
+ email: 'morgan.richomme@orange.com'
+ company: 'orange'
+ id: 'mrichomme'
+ timezone: 'France/Paris'
+ - name: 'Bartek Grzybowski'
+ email: 'b.grzybowski@partner.samsung.com'
+ company: 'samsung'
+ id: 'bgrzybowski'
+ timezone: 'Poland/Warsaw'
+ - name: 'Marcin Przybysz'
+ email: 'marcin.przybysz@nokia.com'
+ company: 'nokia'
+ id: 'mprzybys'
+ timezone: 'Poland/Warsaw'
+ - name: 'Krzysztof Kuzmicki'
+ email: 'krzysztof.kuzmicki@nokia.com'
+ company: 'nokia'
+ id: 'kkuzmick'
+ timezone: 'Europe/Warsaw'
+ - name: 'Pawel Wieczorek'
+ email: 'p.wieczorek2@samsung.com'
+ company: 'samsung'
+ id: 'pwieczorek'
+ timezone: 'Europe/Warsaw'
+ - name: 'Andreas Geissler'
+ email: 'andreas-geissler@telekom.de'
+ company: 'Deutsche Telekom'
+ id: 'andreasgeissler'
+ timezone: 'Europe/Berlin'
+ - name: 'Michal Jagiello'
+ email: 'Michal.Jagiello@t-mobile.pl'
+ company: 'T-Mobile'
+ id: 'MichalJagielloTMPL'
+ timezone: 'Europe/Warsaw'
+ - name: 'Lukasz Rajewski'
+ email: 'lukasz.rajewski@orange.com'
+ company: 'Orange'
+ id: 'rajewluk'
+ timezone: 'Europe/Warsaw'
+ - name: 'Thierry Hardy'
+ email: 'thierry.hardy@orange.com'
+ company: 'orange'
+ id: 'jardellos'
+ timezone: 'Europe/Paris'
+ - name: 'Lasse Kaihlavirta'
+ email: 'l.kaihlavirt@partner.samsung.com'
+ company: 'samsung'
+ id: 'kaihlavi'
+ timezone: 'Europe/Helsinki'
+tsc:
+ approval: 'https://lists.onap.org/pipermail/onap-tsc'
+ changes:
+ - type: 'Addition'
+ name: 'Morgan Richomme'
+ link: 'https://wiki.onap.org/display/DW/TSC+2019-10-17'
+ - type: 'Deletion'
+ name: 'Xiaolong Kong, Hector Anapan-Lavalle'
+ link: 'https://lists.onap.org/g/onap-discuss/message/19277'
+ - type: 'Addition'
+ name: 'Bartek, Marcin, Eric'
+ link: 'https://wiki.onap.org/display/DW/Integration+Team'
+ - type: 'Addition'
+ name: 'Krzysztof Kuzmicki'
+ link: 'https://lists.onap.org/g/onap-tsc/message/6232'
+ - type: 'Addition'
+ name: 'Pawel Wieczorek'
+ link: 'https://lists.onap.org/g/onap-tsc/message/6341'
+ - type: 'Addition'
+ name: 'Andreas Geissler'
+ link: 'https://lists.onap.org/g/onap-tsc/message/6342'
+ - type: 'Addition'
+ name: 'Michal Jagiello'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7102'
+ - type: 'Addition'
+ name: 'Lukasz Rajewski'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7102'
+ - type: 'Addition'
+ name: 'Thierry Hardy'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7102'
+ - type: 'Addition'
+ name: 'Lasse Kaihlavirta'
+ link: 'https://lists.onap.org/g/onap-tsc/message/7280'
diff --git a/kubernetes/robot/demo-k8s.sh b/kubernetes/robot/demo-k8s.sh
new file mode 100755
index 0000000000..5d8fed829c
--- /dev/null
+++ b/kubernetes/robot/demo-k8s.sh
@@ -0,0 +1,235 @@
+#!/bin/bash
+# Copyright (C) 2018 Amdocs, Bell Canada
+# Modifications Copyright (C) 2019 Samsung
+# Modifications Copyright (C) 2020 Nokia
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Execute tags built to support the hands-on demo
+#
+function usage
+{
+ echo "Usage: demo-k8s.sh <namespace> <command> [<parameters>] [execscript]"
+ echo " "
+ echo " demo-k8s.sh <namespace> init"
+ echo " - Execute both init_customer + distribute"
+ echo " "
+ echo " demo-k8s.sh <namespace> init_customer"
+ echo " - Create demo customer (Demonstration) and services, etc."
+ echo " "
+ echo " demo-k8s.sh <namespace> distribute [<prefix>]"
+ echo " - Distribute demo models (demoVFW and demoVLB)"
+ echo " "
+ echo " demo-k8s.sh <namespace> preload <vnf_name> <module_name>"
+ echo " - Preload data for VNF for the <module_name>"
+ echo " "
+ echo " demo-k8s.sh <namespace> appc <module_name>"
+ echo " - provide APPC with vFW module mount point for closed loop"
+ echo " "
+ echo " demo-k8s.sh <namespace> init_robot [ <etc_hosts_prefix> ]"
+ echo " - Initialize robot after all ONAP VMs have started"
+ echo " "
+ echo " demo-k8s.sh <namespace> instantiateVFW"
+ echo " - Instantiate vFW module for the demo customer (DemoCust<uuid>)"
+ echo " "
+ echo " demo-k8s.sh <namespace> instantiateVFWdirectso csar_filename"
+ echo " - Instantiate vFW module using direct SO interface using previously distributed model "
+ echo " that is in /tmp/csar in robot container"
+ echo " "
+ echo " demo-k8s.sh <namespace> instantiateVLB_CDS"
+ echo " - Instantiate vLB module using CDS with a preloaded CBA "
+ echo " "
+ echo " demo-k8s.sh <namespace> deleteVNF <module_name from instantiateVFW>"
+ echo " - Delete the module created by instantiateVFW"
+ echo " "
+ echo " demo-k8s.sh <namespace> vfwclosedloop <pgn-ip-address>"
+ echo " - vFWCL: Sets the packet generator to high and low rates, and checks whether the policy "
+ echo " kicks in to modulate the rates back to medium"
+ echo " "
+ echo " demo-k8s.sh <namespace> <command> [<parameters>] execscript"
+ echo " - Optional parameter to execute user custom scripts located in scripts/demoscript directory"
+ echo " "
+}
+
+# Check if execscript flag is used and drop it from input arguments
+
+if [[ "${!#}" == "execscript" ]]; then
+ set -- "${@:1:$#-1}"
+ execscript=true
+fi
+
+# Set the defaults
+
+echo "Number of parameters:"
+echo $#
+
+if [ $# -lt 2 ];then
+ usage
+ exit
+fi
+
+NAMESPACE=$1
+shift
+
+##
+## if more than 1 tag is supplied, the must be provided with -i or -e
+##
+while [ $# -gt 0 ]
+do
+ key="$1"
+ echo "KEY:"
+ echo $key
+
+ case $key in
+ init_robot)
+ TAG="UpdateWebPage"
+ read -s -p "WEB Site Password for user 'test': " WEB_PASSWORD
+ if [ "$WEB_PASSWORD" = "" ]; then
+ echo ""
+ echo "WEB Password is required for user 'test'"
+ exit
+ fi
+ VARIABLES="$VARIABLES -v WEB_PASSWORD:$WEB_PASSWORD"
+ shift
+ if [ $# -eq 2 ];then
+ VARIABLES="$VARIABLES -v HOSTS_PREFIX:$1"
+ fi
+ shift
+ ;;
+ init)
+ TAG="InitDemo"
+ shift
+ ;;
+ vescollector)
+ TAG="vescollector"
+ shift
+ ;;
+ distribute_vcpe)
+ TAG="distributeVCPE"
+ shift
+ ;;
+ init_customer)
+ TAG="InitCustomer"
+ shift
+ ;;
+ distribute)
+ TAG="InitDistribution"
+ shift
+ if [ $# -eq 1 ];then
+ VARIABLES="$VARIABLES -v DEMO_PREFIX:$1"
+ fi
+ shift
+ ;;
+ preload)
+ TAG="PreloadDemo"
+ shift
+ if [ $# -ne 2 ];then
+ echo "Usage: demo-k8s.sh <namespace> preload <vnf_name> <module_name>"
+ exit
+ fi
+ VARIABLES="$VARIABLES -v VNF_NAME:$1"
+ shift
+ VARIABLES="$VARIABLES -v MODULE_NAME:$1"
+ shift
+ ;;
+ appc)
+ TAG="APPCMountPointDemo"
+ shift
+ if [ $# -ne 1 ];then
+ echo "Usage: demo-k8s.sh <namespace> appc <module_name>"
+ exit
+ fi
+ VARIABLES="$VARIABLES -v MODULE_NAME:$1"
+ shift
+ ;;
+ instantiateVFW)
+ TAG="instantiateVFW"
+ VARIABLES="$VARIABLES -v GLOBAL_BUILD_NUMBER:$$"
+ shift
+ ;;
+ instantiateVFWdirectso)
+ TAG="instantiateVFWdirectso"
+ shift
+ if [ $# -ne 1 ];then
+ echo "Usage: demo-k8s.sh <namespace> instantiateVFWdirectso <csar_filename>"
+ exit
+ fi
+ VARIABLES="$VARIABLES -v CSAR_FILE:$1 -v GLOBAL_BUILD_NUMBER:$$"
+ shift
+ ;;
+ instantiateVLB_CDS)
+ TAG="instantiateVLB_CDS"
+ VARIABLES="$VARIABLES -v GLOBAL_BUILD_NUMBER:$$"
+ shift
+ ;;
+ deleteVNF)
+ TAG="deleteVNF"
+ shift
+ if [ $# -ne 1 ];then
+ echo "Usage: demo-k8s.sh <namespace> deleteVNF <module_name from instantiateVFW>"
+ exit
+ fi
+ VARFILE=$1.py
+ VARIABLES="$VARIABLES -V /share/${VARFILE}"
+ shift
+ ;;
+ cds)
+ TAG="cds"
+ shift
+ ;;
+ distributeVFWNG)
+ TAG="distributeVFWNG"
+ shift
+ ;;
+ distributeDemoVFWDT)
+ TAG="DistributeDemoVFWDT"
+ shift
+ ;;
+ instantiateDemoVFWDT)
+ TAG="instantiateVFWDT"
+ shift
+ ;;
+ vfwclosedloop)
+ TAG="vfwclosedloop"
+ shift
+ VARIABLES="$VARIABLES -v PACKET_GENERATOR_HOST:$1 -v pkg_host:$1"
+ shift
+ ;;
+ *)
+ usage
+ exit
+ esac
+done
+
+set -x
+
+POD=$(kubectl --namespace $NAMESPACE get pods | sed 's/ .*//'| grep robot)
+
+DIR=$(dirname "$0")
+SCRIPTDIR=scripts/demoscript
+
+ETEHOME=/var/opt/ONAP
+
+if [ $execscript ]; then
+ for script in $(ls -1 "$DIR/$SCRIPTDIR"); do
+ [ -f "$DIR/$SCRIPTDIR/$script" ] && [ -x "$DIR/$SCRIPTDIR/$script" ] && source "$DIR/$SCRIPTDIR/$script"
+ done
+fi
+
+export GLOBAL_BUILD_NUMBER=$(kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "ls -1q /share/logs/ | wc -l")
+OUTPUT_FOLDER=$(printf %04d $GLOBAL_BUILD_NUMBER)_demo_$key
+DISPLAY_NUM=$(($GLOBAL_BUILD_NUMBER + 90))
+
+VARIABLEFILES="-V /share/config/robot_properties.py"
+
+kubectl --namespace $NAMESPACE exec ${POD} -- ${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} -i ${TAG} --display $DISPLAY_NUM 2> ${TAG}.out
diff --git a/kubernetes/robot/ete-k8s.sh b/kubernetes/robot/ete-k8s.sh
new file mode 100755
index 0000000000..e3d05df0ef
--- /dev/null
+++ b/kubernetes/robot/ete-k8s.sh
@@ -0,0 +1,106 @@
+# Copyright © 2018 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/bin/bash
+
+#
+# Run the testsuite for the passed tag. Valid tags are listed in usage help
+# Please clean up logs when you are done...
+#
+if [ "$1" == "" ] || [ "$2" == "" ]; then
+ echo "Usage: ete-k8s.sh [namespace] [tag] [execscript]"
+ echo ""
+ echo " List of test case tags (filename for intent: tag)"
+ echo ""
+ echo " cds.robot: cds"
+ echo ""
+ echo " clamp.robot: clamp"
+ echo ""
+ echo " demo.robot: InitDemo, InitCustomer, APPCCDTPreloadDemo, APPCMountPointDemo, DistributeDemoVFWDT, DistributeVFWNG,"
+ echo " InitDistribution, PreloadDemo, deleteVNF, instantiateDemoVFWCL, instantiateVFW, "
+ echo " instantiateVFWCL, instantiateVFWDT, instantiateVFWCLDN"
+ echo ""
+ echo " health-check.robot: health, core, small, medium, 3rdparty, api, datarouter, externalapi, health-aaf, health-aai, health-appc,"
+ echo " health-clamp, health-cli, health-dcae, health-dmaap, health-log, health-modeling, health-msb,"
+ echo " health-multicloud, health-oof, health-policy, health-pomba, health-portal, health-sdc, health-sdnc,"
+ echo " health-so, health-uui, health-vfc, health-vid, health-vnfsdk, healthdist, healthlogin, healthmr,"
+ echo " healthportalapp, multicloud, oom"
+ echo ""
+ echo " hvves.robot: HVVES, ete"
+ echo ""
+ echo " model-distribution-vcpe.robot: distributevCPEResCust"
+ echo ""
+ echo " model-distribution.robot: distribute, distributeVFWDT, distributeVLB"
+ echo ""
+ echo " oof-*.robot: cmso, has, homing"
+ echo ""
+ echo " pnf-registration.robot: ete, pnf_registrate"
+ echo ""
+ echo " post-install-tests.robot: dmaapacl, postinstall"
+ echo ""
+ echo " sdc-dcae-d.robot: sdc-dcae-d"
+ echo ""
+ echo " security.robot: security"
+ echo ""
+ echo " update_onap_page.robot: UpdateWebPage"
+ echo ""
+ echo " vnf-orchestration-direct-so.robot: instantiateVFWdirectso"
+ echo ""
+ echo " vnf-orchestration.robot: instantiate, instantiateNoDelete, stability72hr"
+ echo ""
+ echo " [execscript] - optional parameter to execute user custom scripts located in scripts/etescript directory"
+ exit
+fi
+
+set -x
+
+export NAMESPACE="$1"
+
+POD=$(kubectl --namespace $NAMESPACE get pods | sed 's/ .*//'| grep robot)
+
+TAGS="-i $2"
+
+DIR=$(dirname "$0")
+SCRIPTDIR=scripts/etescript
+
+ETEHOME=/var/opt/ONAP
+
+if [[ "${!#}" == "execscript" ]]; then
+ for script in $(ls -1 "$DIR/$SCRIPTDIR"); do
+ [ -f "$DIR/$SCRIPTDIR/$script" ] && [ -x "$DIR/$SCRIPTDIR/$script" ] && source "$DIR/$SCRIPTDIR/$script"
+ done
+fi
+
+export GLOBAL_BUILD_NUMBER=$(kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "ls -1q /share/logs/ | wc -l")
+OUTPUT_FOLDER=$(printf %04d $GLOBAL_BUILD_NUMBER)_ete_$2
+DISPLAY_NUM=$(($GLOBAL_BUILD_NUMBER + 90))
+
+VARIABLEFILES="-V /share/config/robot_properties.py"
+VARIABLES="-v GLOBAL_BUILD_NUMBER:$$"
+
+case $2 in
+ security)
+ if [ -z "$NODEPORTS_FILE" ]; then
+ echo "Security tests require gathering additional information on ONAP cluster."
+ echo "It is unavailable from within Robot pod."
+ echo ""
+ echo "Rerun command with \"execscript\" argument, e.g."
+ echo "$ $0 onap security execscript"
+ exit
+ fi
+ VARIABLES="${VARIABLES} -v ACTUAL_NODEPORTS_FILE:${NODEPORTS_FILE}"
+ ;;
+esac
+
+kubectl --namespace $NAMESPACE exec ${POD} -- ${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} ${TAGS} --display $DISPLAY_NUM
diff --git a/kubernetes/robot/eteHelm-k8s.sh b/kubernetes/robot/eteHelm-k8s.sh
new file mode 100755
index 0000000000..9f8cbb2faf
--- /dev/null
+++ b/kubernetes/robot/eteHelm-k8s.sh
@@ -0,0 +1,60 @@
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/bin/bash
+
+#
+# Run the health-check testsuites for the tags discovered by helm list
+# Please clean up logs when you are done...
+#
+if [ "$1" == "" ] ; then
+ echo "Usage: eteHelm-k8s.sh [namespace] [execscript]"
+ echo " list projects via helm list and runs health-check with those tags except dev and dev-consul"
+ echo " [execscript] - optional parameter to execute user custom scripts located in scripts/helmscript directory"
+ exit
+fi
+
+set -x
+
+export NAMESPACE="$1"
+
+POD=$(kubectl --namespace $NAMESPACE get pods | sed 's/ .*//'| grep robot)
+
+PROJECTS=$(helm list | tail -n +3 | grep '-' | cut -d' ' -f1 | sed -E 's/\w+-(\w+)/health-\1/g' | grep -v consul | grep -v nfs-provision)
+
+TAGS=""
+for project in $PROJECTS ;
+do
+TAGS="$TAGS -i $project"
+done
+
+DIR=$(dirname "$0")
+SCRIPTDIR=scripts/helmscript
+
+ETEHOME=/var/opt/ONAP
+
+if [[ "${!#}" == "execscript" ]]; then
+ for script in $(ls -1 "$DIR/$SCRIPTDIR"); do
+ [ -f "$DIR/$SCRIPTDIR/$script" ] && [ -x "$DIR/$SCRIPTDIR/$script" ] && source "$DIR/$SCRIPTDIR/$script"
+ done
+fi
+
+export GLOBAL_BUILD_NUMBER=$(kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "ls -1q /share/logs/ | wc -l")
+OUTPUT_FOLDER=$(printf %04d $GLOBAL_BUILD_NUMBER)_ete_helmlist
+DISPLAY_NUM=$(($GLOBAL_BUILD_NUMBER + 90))
+
+VARIABLEFILES="-V /share/config/robot_properties.py"
+VARIABLES="-v GLOBAL_BUILD_NUMBER:$$"
+
+kubectl --namespace $NAMESPACE exec ${POD} -- ${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} ${TAGS} --display $DISPLAY_NUM
diff --git a/kubernetes/robot/instantiate-k8s.sh b/kubernetes/robot/instantiate-k8s.sh
new file mode 100755
index 0000000000..67fd403a12
--- /dev/null
+++ b/kubernetes/robot/instantiate-k8s.sh
@@ -0,0 +1,150 @@
+#!/bin/bash
+# Copyright 2019 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+NAMESPACE=
+FOLDER=
+POLL=0
+
+function check_required_parameter() {
+ # arg1 = parameter
+ # arg2 = parameter name
+ if [ -z "$1" ]; then
+ echo "$2 was not was provided. This parameter is required."
+ exit 1
+ fi
+}
+
+function check_optional_paramater() {
+ # arg1 = parameter
+ # arg2 = parameter name
+ if [ -z $1 ]; then
+ echo "$2"
+ else
+ echo "$1"
+ fi
+}
+
+while test $# -gt 0; do
+ case "$1" in
+ -h|--help)
+ echo "./instantiate-k8s.sh [options]"
+ echo " "
+ echo "required:"
+ echo "-n, --namespace <namespace> namespace that robot pod is running under."
+ echo "-f, --folder <folder> path to folder containing heat templates, preloads, and vnf-details.json."
+ echo " "
+ echo "additional options:"
+ echo "-p, --poll some cloud environments (like azure) have a short time out value when executing"
+ echo " kubectl. If your shell exits before the testsuite finishes, using this option"
+ echo " will poll the testsuite logs every 30 seconds until the test finishes."
+ echo " "
+ echo "This script executes the VNF instantiation robot testsuite."
+ echo "- It copies the VNF folder to the robot container that is part of the ONAP deployment."
+ echo "- It models, distributes, and instantiates a heat-based VNF."
+ echo "- It copies the logs to an output directory, and creates a tarball for upload to the OVP portal."
+ echo ""
+ exit 0
+ ;;
+ -n|--namespace)
+ shift
+ NAMESPACE=$1
+ shift
+ ;;
+ -f|--folder)
+ shift
+ FOLDER=$1
+ shift
+ ;;
+ -p|--poll)
+ shift
+ POLL=1
+ ;;
+ *)
+ echo "Unknown Argument $1. Try running with --help."
+ exit 0
+ ;;
+ esac
+done
+
+check_required_parameter "$NAMESPACE" "--namespace"
+check_required_parameter "$FOLDER" "--folder"
+
+TAG="instantiate_vnf_ovp"
+
+if [ ! -d "$FOLDER" ]; then
+ echo "VNF folder $FOLDER does not exist, exiting."
+ exit 1
+fi
+
+BUILDNUM="$$"
+OUTPUT_DIRECTORY=/tmp/vnfdata.${BUILDNUM}
+
+set -x
+
+POD=$(kubectl --namespace $NAMESPACE get pods | sed 's/ .*//'| grep robot)
+export GLOBAL_BUILD_NUMBER=$(kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "ls -1q /share/logs/ | wc -l")
+TAGS="-i $TAG"
+ETEHOME=/var/opt/ONAP
+OUTPUT_FOLDER=$(printf %04d $GLOBAL_BUILD_NUMBER)_ete_instantiate_vnf
+DISPLAY_NUM=$(($GLOBAL_BUILD_NUMBER + 90))
+VARIABLEFILES="-V /share/config/robot_properties.py"
+VARIABLES="$VARIABLES -v GLOBAL_BUILD_NUMBER:${BUILDNUM}"
+
+echo "Copying the VNF folder into robot pod..."
+kubectl --namespace $NAMESPACE cp $FOLDER ${POD}:/tmp/vnfdata.${BUILDNUM}
+
+
+echo "Executing instantiation..."
+
+if [ $POLL = 1 ]; then
+ kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} ${TAGS} --listener ${ETEHOME}/testsuite/eteutils/robotframework-onap/listeners/OVPListener.py --display $DISPLAY_NUM > /tmp/vnf_instantiation.$BUILDNUM.log 2>&1 &"
+
+ pid=`kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "pgrep runTags.sh -n"`
+
+ if [ -z "$pid" ]; then
+ echo "robot testsuite unable to start"
+ exit 1
+ fi
+
+ kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "while ps -p \"$pid\" --no-headers | grep -v defunct; do echo \$'\n\n'; echo \"Testsuite still running \"\`date\`; echo \"LOG FILE: \"; tail -10 /tmp/vnf_instantiation.$BUILDNUM.log; sleep 30; done"
+
+else
+ kubectl --namespace $NAMESPACE exec ${POD} -- bash -c "${ETEHOME}/runTags.sh ${VARIABLEFILES} ${VARIABLES} -d /share/logs/${OUTPUT_FOLDER} ${TAGS} --listener ${ETEHOME}/testsuite/eteutils/robotframework-onap/listeners/OVPListener.py --display $DISPLAY_NUM"
+fi
+
+set +x
+
+echo "testsuite has finished"
+
+mkdir -p "$OUTPUT_DIRECTORY"
+echo "Copying Results from pod..."
+
+kubectl --namespace $NAMESPACE cp ${POD}:share/logs/$OUTPUT_FOLDER/summary/report.json "$OUTPUT_DIRECTORY"/report.json
+kubectl --namespace $NAMESPACE cp ${POD}:share/logs/$OUTPUT_FOLDER/summary/stack_report.json "$OUTPUT_DIRECTORY"/stack_report.json
+kubectl --namespace $NAMESPACE cp ${POD}:share/logs/$OUTPUT_FOLDER/summary/results.json "$OUTPUT_DIRECTORY"/results.json
+kubectl --namespace $NAMESPACE cp ${POD}:share/logs/$OUTPUT_FOLDER/log.html "$OUTPUT_DIRECTORY"/log.html
+
+pushd .
+
+# echo -e "import hashlib\nwith open(\"README.md\", \"r\") as f: bytes = f.read()\nreadable_hash = hashlib.sha256(bytes).hexdigest()\nprint(readable_hash)" | python
+
+cd "$OUTPUT_DIRECTORY"
+tar -czvf vnf_heat_results.tar.gz *
+
+popd
+
+echo "VNF test results: $OUTPUT_DIRECTORY/vnf_heat_results.tar.gz"
diff --git a/kubernetes/robot/onap_dev.pvt b/kubernetes/robot/onap_dev.pvt
new file mode 100644
index 0000000000..81e334a684
--- /dev/null
+++ b/kubernetes/robot/onap_dev.pvt
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAylw4KKN/ljqnFBvP+blG5PNfsnM4MAuGPMsE5rkKmzcZWNaE
+NGMXTFKlJ4YrUl7OUv8kbgFTmB8BoNpgrNtKACDaz/psQSOeOADCG/YrT4wrYKrR
+NhFqOjJpxRmxweEsd14qBOxeFT7Ie42qbCMMzo260HvjLmtUxkOXeJ3xDkGmoJVy
+yzxX7nO1m4WyWyukO6x6mX0XDsADF4A6AapcqinoisJ7pnXaNkcjU/JY2Jrwem7s
++ypzIp86O6gdLpLVU9ORR/UYNAk1h+Z6K5Rual4D9mrpC9IJNaYfIgLe7mC39ZLa
+fiySNoGhei9P6pYvRJlQki69bid/EPAgX5YZIQIDAQABAoIBAQClDekkhI9ZqseC
+qFjPuKaxsizZMg+faJb6WSHLSxzyk1OSWY6F6FklgLeC8HW/fuLNYZyGOYDEsG20
+lMqL02Wdiy7OutS3oOS5iyzIf9a90HfFJi706el6RIpvINETcaXCS0T8tQrcS1Rd
+KqTaBRC6HXJGAPbBcvw3pwQSdskatU6a/Kt2a3x6DsqqinQcgEB/SbrDaJCUX9sb
+F2HVUwdq7aZK1Lk0ozr1FID9mrhjwWuQ6XC+vjG0FqtyXeMpR5iaQ73hex3FXQ8z
+OjkFbMwuHWSh1DSx70r5yFrrBqwQKnMsBqx4QDRf3fIENUnWviaL+n+gwcXA07af
+4kaNUFUtAoGBAPuNNRAGhZnyZ9zguns9PM56nmeMUikV5dPN2DTbQb79cpfV+7pC
+6PeSH/dTKFLz62d6qAM2EsNXQvewf8fipBVBRPsRqKOv+uepd01dHNy62I5B+zRm
+be9Kbe+EN60qdzvyPM+2hV6CnvGv1dirimS9pu6RrxD2Rmz1ectnJE+rAoGBAM3w
+UbSEemyZ6EKjck2RfdipzY0MNBnIZ2cUqHh8mmPXjdTLzpXb9vmPbHb01Qwo8MP+
+gMnTbTBOzyNAaHdIrCO9FHW6C85j3ot5Yzcr+EcBVcua+7KHU0Sgn44JNH8DisJ7
+Y63UP/1Xb4d1/QvHfxYy3WOvvRdVZ7pPo8JNX95jAoGAIe5CIg8/JizUZa7KeKUh
+9pgDleQPkQsrHQ6/AyIwFBsLwf9THSS5V+uV9D57SfUs46Bf2U8J6N90YQSlt8iS
+aWuManFPVgT+yxDIzt6obf2mCEpOIBtQ6N4ZRh2HhQwdWTCrkzkDdGQaHG+jYL6C
+xGPwiG2ON7OAfGIAM7eN5lECgYEAhoRLWlaOgRGnHKAWsYQvZ67CjTdDcPPuVu6v
+fMQnNMA/7JeTwV+E205L0wfpgZ/cZKmBBlQMJlnUA3q2wfO+PTnse1mjDJU/cGtB
+22/lJLxChlQdxGeQhGtGzUhF+hEeOhrO6WSSx7CtMRZoy6Dr6lwfMFZCdVNcBd6v
+YOOZk3ECgYEAseUKGb6E80XTVVNziyuiVbQCsI0ZJuRfqMZ2IIDQJU9u6AnGAway
+itqHbkGsmDT+4HUz01+1JKnnw42RdSrHdU/LaOonD+RIGqe2x800QXzqASKLdCXr
+y7RoiFqJtkdFQykzJemA+xOXvHLgKi/MXFsU90PCD0VJKLj8vwpX78Y=
+-----END RSA PRIVATE KEY-----
diff --git a/kubernetes/robot/requirements.yaml b/kubernetes/robot/requirements.yaml
new file mode 100644
index 0000000000..cac9ba506f
--- /dev/null
+++ b/kubernetes/robot/requirements.yaml
@@ -0,0 +1,22 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+ - name: common
+ version: ~7.x-0
+ # local reference to common chart, as it is
+ # a part of this chart's package and will not
+ # be published independently to a repo (at this point)
+ repository: '@local'
+
diff --git a/kubernetes/robot/resources/config/eteshare/config/robot_properties.py b/kubernetes/robot/resources/config/eteshare/config/robot_properties.py
new file mode 100644
index 0000000000..ef12c9a848
--- /dev/null
+++ b/kubernetes/robot/resources/config/eteshare/config/robot_properties.py
@@ -0,0 +1,345 @@
+# Copyright (c) 2018 Amdocs, Bell Canada, and others
+# Modifications Copyright (c) 2020 AT&T Intellectual Property
+# Modifications Copyright (c) 2020 NOKIA Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+GLOBAL_INJECTED_AAF_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "aaf-service") }}'
+GLOBAL_INJECTED_AAI_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "aai") }}'
+GLOBAL_INJECTED_APPC_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "appc") }}'
+GLOBAL_INJECTED_APPC_CDT_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "appc-cdt") }}'
+GLOBAL_INJECTED_ARTIFACTS_VERSION = '{{.Values.demoArtifactsVersion}}'
+GLOBAL_INJECTED_ARTIFACTS_REPO_URL = "{{ .Values.demoArtifactsRepoUrl }}"
+GLOBAL_INJECTED_CLAMP_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "clamp") }}'
+GLOBAL_INJECTED_CLI_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "cli") }}'
+GLOBAL_INJECTED_CLOUD_ENV = 'openstack'
+GLOBAL_INJECTED_DCAE_COLLECTOR_IP = "{{ .Values.dcaeCollectorIp }}"
+GLOBAL_INJECTED_DCAE_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "dcae-healthcheck") }}'
+GLOBAL_INJECTED_DCAE_VES_HOST = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "dcae-ves-collector") }}'
+GLOBAL_INJECTED_DMAAP_DR_PROV_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "dmaap-dr-prov") }}'
+GLOBAL_INJECTED_DMAAP_DR_NODE_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "dmaap-dr-node") }}'
+GLOBAL_INJECTED_DNS_IP_ADDR = 'N/A'
+GLOBAL_INJECTED_DOCKER_VERSION = '1.2-STAGING-latest'
+GLOBAL_INJECTED_EXTERNAL_DNS = 'N/A'
+GLOBAL_INJECTED_LOG_ELASTICSEARCH_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "log-es") }}'
+GLOBAL_INJECTED_LOG_KIBANA_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "log-kibana") }}'
+GLOBAL_INJECTED_LOG_LOGSTASH_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "log-ls-http") }}'
+GLOBAL_INJECTED_POMBA_AAI_CONTEXT_BUILDER_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-aaictxbuilder") }}'
+GLOBAL_INJECTED_POMBA_SDC_CONTEXT_BUILDER_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-sdcctxbuilder") }}'
+GLOBAL_INJECTED_POMBA_NETWORK_DISC_CONTEXT_BUILDER_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-networkdiscoveryctxbuilder") }}'
+GLOBAL_INJECTED_POMBA_SERVICE_DECOMPOSITION_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-servicedecomposition") }}'
+GLOBAL_INJECTED_POMBA_SDNC_CTX_BUILDER_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-sdncctxbuilder") }}'
+GLOBAL_INJECTED_POMBA_NETWORKDISCOVERY_MICROSERVICE_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-networkdiscovery") }}'
+GLOBAL_INJECTED_POMBA_VALIDATION_SERVICE_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-validation-service") }}'
+GLOBAL_INJECTED_POMBA_KIBANA_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-kibana") }}'
+GLOBAL_INJECTED_POMBA_ELASTIC_SEARCH_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-es") }}'
+GLOBAL_INJECTED_POMBA_CONTEX_TAGGREGATOR_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pomba-contextaggregator") }}'
+GLOBAL_INJECTED_KEYSTONE = '{{ .Values.openStackKeyStoneUrl }}'
+GLOBAL_INJECTED_MR_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "message-router") }}'
+GLOBAL_INJECTED_BC_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "dmaap-bc") }}'
+GLOBAL_INJECTED_MUSIC_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "music") }}'
+GLOBAL_INJECTED_NBI_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "nbi") }}'
+GLOBAL_INJECTED_NETWORK = '{{ .Values.openStackPrivateNetId }}'
+GLOBAL_INJECTED_NEXUS_DOCKER_REPO = '{{ include "common.repository" . }}'
+GLOBAL_INJECTED_NEXUS_PASSWORD = 'docker'
+GLOBAL_INJECTED_NEXUS_REPO ='https://nexus.onap.org/content/sites/raw'
+GLOBAL_INJECTED_NEXUS_USERNAME = 'docker'
+GLOBAL_INJECTED_OOF_IP_ADDR = 'N/A'
+GLOBAL_INJECTED_OOF_HOMING_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "oof-has-api") }}'
+GLOBAL_INJECTED_OOF_SNIRO_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "oof-osdf") }}'
+GLOBAL_INJECTED_OOF_CMSO_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "oof-cmso") }}'
+GLOBAL_INJECTED_MSB_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "msb-iag") }}'
+GLOBAL_INJECTED_OPENSTACK_API_KEY = '{{ .Values.config.openStackEncryptedPasswordHere}}'
+GLOBAL_INJECTED_OPENSTACK_TENANT_ID = '{{ .Values.openStackTenantId }}'
+GLOBAL_INJECTED_OPENSTACK_USERNAME = '{{ .Values.openStackUserName }}'
+GLOBAL_INJECTED_OPENSTACK_PROJECT_NAME = '{{ .Values.openStackProjectName }}'
+GLOBAL_INJECTED_OPENSTACK_DOMAIN_ID = '{{ .Values.openStackDomainId }}'
+GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN = '{{ .Values.openStackUserDomain }}'
+GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION = '{{ .Values.openStackKeystoneAPIVersion }}'
+GLOBAL_INJECTED_REGION_THREE = '{{ .Values.openStackRegionRegionThree }}'
+GLOBAL_INJECTED_KEYSTONE_REGION_THREE = '{{ .Values.openStackKeyStoneUrlRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION_REGION_THREE = '{{ .Values.openStackKeystoneAPIVersionRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_USERNAME_REGION_THREE = '{{ .Values.openStackUserNameRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_SO_ENCRYPTED_PASSWORD_REGION_THREE = '{{ .Values.openSackMsoEncryptdPasswordRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_SO_ENCRYPTED_PASSWORD = '{{ .Values.config.openStackSoEncryptedPassword}}'
+GLOBAL_INJECTED_OPENSTACK_TENANT_ID_REGION_THREE = '{{ .Values.openStackTenantIdRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_PROJECT_DOMAIN_REGION_THREE = '{{ .Values.openStackProjectNameRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN_REGION_THREE = '{{ .Values.openStackDomainIdRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_OAM_NETWORK_CIDR_PREFIX = '{{ .Values.openStackOamNetworkCidrPrefix }}'
+GLOBAL_INJECTED_OPENSTACK_OAM_NETWORK_3RD_OCTET = '{{ .Values.openStackOamNetwork3rdOctet}}'
+GLOBAL_INJECTED_OPENSTACK_PUBLIC_NETWORK = '{{ .Values.openStackPublicNetworkName }}'
+GLOBAL_INJECTED_OPENSTACK_SECURITY_GROUP = '{{ .Values.openStackSecurityGroup }}'
+GLOBAL_INJECTED_OPENSTACK_PRIVATE_SUBNET_ID = "{{ .Values.openStackPrivateSubnetId }}"
+GLOBAL_INJECTED_OPENSTACK_PRIVATE_NET_CIDR = "{{ .Values.openStackPrivateNetCidr }}"
+GLOBAL_INJECTED_POLICY_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "pdp") }}'
+GLOBAL_INJECTED_POLICY_DROOLS_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "policy-drools-pdp") }}'
+GLOBAL_INJECTED_PORTAL_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "portal-app") }}'
+GLOBAL_INJECTED_POLICY_API_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "policy-api") }}'
+GLOBAL_INJECTED_POLICY_PAP_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "policy-pap") }}'
+GLOBAL_INJECTED_POLICY_DISTRIBUTION_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "policy-distribution") }}'
+GLOBAL_INJECTED_POLICY_PDPX_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "policy-xacml-pdp") }}'
+GLOBAL_INJECTED_POLICY_APEX_PDP_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "policy-apex-pdp") }}'
+GLOBAL_INJECTED_PUBLIC_NET_ID = '{{ .Values.openStackPublicNetId }}'
+GLOBAL_INJECTED_PRIVATE_KEY = "{{ .Files.Get .Values.vnfPrivateKey }}"
+GLOBAL_INJECTED_PUBLIC_KEY = "{{ .Values.vnfPubKey }}"
+GLOBAL_INJECTED_REGION = '{{ .Values.openStackRegion }}'
+GLOBAL_INJECTED_SCRIPT_VERSION = '{{ .Values.scriptVersion }}'
+GLOBAL_INJECTED_SDC_BE_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "sdc-be") }}'
+GLOBAL_INJECTED_SDC_BE_ONBOARD_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "sdc-onboarding-be") }}'
+GLOBAL_INJECTED_SDC_FE_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "sdc-fe") }}'
+GLOBAL_INJECTED_SDC_DCAE_BE_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "sdc-dcae-be") }}'
+GLOBAL_INJECTED_SDC_IP_ADDR = 'N/A'
+GLOBAL_INJECTED_SDNC_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "sdnc") }}'
+GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "sdnc-portal") }}'
+GLOBAL_INJECTED_SO_APIHAND_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so") }}'
+GLOBAL_INJECTED_SO_SDCHAND_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-sdc-controller") }}'
+GLOBAL_INJECTED_SO_BPMN_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-bpmn-infra") }}'
+GLOBAL_INJECTED_SO_CATDB_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-catalog-db-adapter") }}'
+GLOBAL_INJECTED_SO_OPENSTACK_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-openstack-adapter") }}'
+GLOBAL_INJECTED_SO_REQDB_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-request-db-adapter") }}'
+GLOBAL_INJECTED_SO_SDNC_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-sdnc-adapter") }}'
+GLOBAL_INJECTED_SO_VFC_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-vfc-adapter") }}'
+GLOBAL_INJECTED_SO_VNFM_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-vnfm-adapter") }}'
+GLOBAL_INJECTED_SO_NSSMF_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "so-nssmf-adapter") }}'
+GLOBAL_INJECTED_UBUNTU_1404_IMAGE = '{{ .Values.ubuntu14Image }}'
+GLOBAL_INJECTED_UBUNTU_1604_IMAGE = '{{ .Values.ubuntu16Image }}'
+GLOBAL_INJECTED_VM_IMAGE_NAME = '{{ .Values.ubuntu14Image }}'
+GLOBAL_INJECTED_DANOS_IMAGE_NAME = '{{ .Values.danosImage }}'
+GLOBAL_INJECTED_DANOS_FLAVOR = '{{ .Values.danosFlavor }}'
+GLOBAL_INJECTED_VID_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "vid") }}'
+GLOBAL_INJECTED_VM_FLAVOR = '{{ .Values.openStackFlavourMedium }}'
+GLOBAL_INJECTED_VNFSDK_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "refrepo") }}'
+GLOBAL_INJECTED_CCSDK_CDS_BLUEPRINT_PROCESSOR_IP_ADDR = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "cds-blueprints-processor-http") }}'
+
+# aaf info - everything is from the private oam network (also called onap private network)
+GLOBAL_AAF_SERVER = 'https://{{include "robot.ingress.svchost" (dict "root" . "hostname" "aaf-service") }}:{{include "robot.ingress.port" (dict "root" . "hostname" "aaf-service" "port" 8100) }}'
+GLOBAL_AAF_USERNAME = '{{ .Values.aafUsername }}'
+GLOBAL_AAF_PASSWORD = '{{ .Values.aafPassword }}'
+GLOBAL_AAF_AUTHENTICATION = [GLOBAL_AAF_USERNAME, GLOBAL_AAF_PASSWORD]
+# aai info - everything is from the private oam network (also called onap private network)
+GLOBAL_AAI_SERVER_PROTOCOL = "https"
+GLOBAL_AAI_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "aai" "port" 8443) }}'
+GLOBAL_AAI_USERNAME = '{{ .Values.aaiUsername }}'
+GLOBAL_AAI_PASSWORD = '{{ .Values.aaiPassword}}'
+GLOBAL_AAI_AUTHENTICATION = [GLOBAL_AAI_USERNAME, GLOBAL_AAI_PASSWORD]
+# appc info - everything is from the private oam network (also called onap private network)
+GLOBAL_APPC_SERVER_PROTOCOL = "https"
+GLOBAL_APPC_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "appc" "port" 8443) }}'
+GLOBAL_APPC_USERNAME = '{{ .Values.appcUsername }}'
+GLOBAL_APPC_PASSWORD = '{{ .Values.appcPassword }}'
+GLOBAL_APPC_AUTHENTICATION = [GLOBAL_APPC_USERNAME, GLOBAL_APPC_PASSWORD]
+GLOBAL_APPC_CDT_SERVER_PROTOCOL = "https"
+GLOBAL_APPC_CDT_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "appc-cdt" "port" 18080) }}'
+GLOBAL_APPC_CDT_USERNAME = "demo"
+# sdc info - everything is from the private oam network (also called onap private network)
+GLOBAL_SDC_SERVER_PROTOCOL = "https"
+GLOBAL_SDC_FE_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "sdc-fe" "port" 9443) }}'
+GLOBAL_SDC_BE_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "sdc-be" "port" 8443) }}'
+GLOBAL_SDC_BE_ONBOARD_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "sdc-onboarding-be" "port" 8445) }}'
+GLOBAL_SDC_DCAE_BE_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "sdc-dcae-be" "port" 8444) }}'
+GLOBAL_SDC_USERNAME = "beep"
+GLOBAL_SDC_PASSWORD = "boop"
+GLOBAL_SDC_AUTHENTICATION = [GLOBAL_SDC_USERNAME, GLOBAL_SDC_PASSWORD]
+# clamp info - everything is from the private oam network (also called onap private network)
+GLOBAL_CLAMP_SERVER_PROTOCOL = "https"
+GLOBAL_CLAMP_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "clamp" "port" 8443) }}'
+# nbi info - everything is from the private oam network (also called onap private network)
+GLOBAL_NBI_SERVER_PROTOCOL = "https"
+GLOBAL_NBI_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "nbi" "port" 8443) }}'
+# cli info - everything is from the private oam network (also called onap private network)
+GLOBAL_CLI_SERVER_PROTOCOL = "https"
+GLOBAL_CLI_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "cli" "port" 443) }}'
+# dcae info - everything is from the private oam network (also called onap private network)
+GLOBAL_DCAE_SERVER_PROTOCOL = "http"
+GLOBAL_DCAE_HEALTH_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "dcae-healthcheck" "port" 80) }}'
+GLOBAL_DCAE_USERNAME = '{{ .Values.dcaeUsername }}'
+GLOBAL_DCAE_PASSWORD = '{{ .Values.dcaePassword}}'
+GLOBAL_DCAE_AUTHENTICATION = [GLOBAL_DCAE_USERNAME, GLOBAL_DCAE_PASSWORD]
+# dcae hv-ves info
+GLOBAL_DCAE_HVVES_SERVER_NAME = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "dcae-hv-ves-collector") }}'
+GLOBAL_DCAE_HVVES_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "dcae-hv-ves-collector" "port" 6061) }}'
+# data router info - everything is from the private oam network (also called onap private network)
+GLOBAL_DMAAP_DR_PROV_SERVER_PROTOCOL = "https"
+GLOBAL_DMAAP_DR_PROV_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "dmaap-dr-prov" "port" 443) }}'
+GLOBAL_DMAAP_DR_NODE_SERVER_PROTOCOL = "https"
+GLOBAL_DMAAP_DR_NODE_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "dmapp-dr-node" "port" 8443) }}'
+# dmaap message router info
+GLOBAL_DMAAP_MESSAGE_ROUTER_SERVER_NAME = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "message-router") }}'
+GLOBAL_DMAAP_MESSAGE_ROUTER_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "message-router" "port" 3904) }}'
+# dmaap kafka info
+GLOBAL_DMAAP_KAFKA_SERVER_NAME = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "message-router-kafka") }}'
+GLOBAL_DMAAP_KAFKA_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "message-router-kafka" "port" 9092) }}'
+GLOBAL_DMAAP_KAFKA_JAAS_USERNAME = '{{ .Values.kafkaJaasUsername }}'
+GLOBAL_DMAAP_KAFKA_JAAS_PASSWORD = '{{ .Values.kafkaJaasPassword }}'
+# DROOL server port and credentials
+GLOBAL_DROOLS_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "policy-drools-pdp" "port" 9696) }}'
+GLOBAL_DROOLS_USERNAME = '{{ .Values.droolsUsername }}'
+GLOBAL_DROOLS_PASSWORD = '{{ .Values.droolsPassword }}'
+GLOBAL_DROOLS_AUTHENTICATION = [GLOBAL_DROOLS_USERNAME, GLOBAL_DROOLS_PASSWORD]
+# log server config - NOTE: no log server is run in HEAT; only on OOM
+GLOBAL_LOG_SERVER_PROTOCOL = "http"
+GLOBAL_LOG_ELASTICSEARCH_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "log-es" "port" 9200) }}'
+GLOBAL_LOG_LOGSTASH_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "log-ls-http" "port" 9600) }}'
+GLOBAL_LOG_KIBANA_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "log-kibana" "port" 5601) }}'
+# pomba info - NOTE: no pomba is run in HEAT; only on OOM
+GLOBAL_POMBA_SERVER_PROTOCOL_HTTP = "http"
+GLOBAL_POMBA_SERVER_PROTOCOL_HTTPS = "https"
+GLOBAL_POMBA_AAICONTEXTBUILDER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-aaictxbuilder" "port" 9530) }}'
+GLOBAL_POMBA_SDCCONTEXTBUILDER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-sdcctxbuilder" "port" 9530) }}'
+GLOBAL_POMBA_NETWORKDISCCONTEXTBUILDER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-networkdiscoveryctxbuilder" "port" 9530) }}'
+GLOBAL_POMBA_SERVICEDECOMPOSITION_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-servicedecomposition" "port" 9532) }}'
+GLOBAL_POMBA_SDNCCXTBUILDER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-sdncctxbuilder" "port" 9530) }}'
+GLOBAL_POMBA_NETWORKDISCOVERY_MICROSERVICE_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-networkdiscovery" "port" 9531) }}'
+GLOBAL_POMBA_VALIDATIONSERVICE_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-validation-service" "port" 9529) }}'
+GLOBAL_POMBA_KIBANA_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-kibana" "port" 5601) }}'
+GLOBAL_POMBA_ELASTICSEARCH_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-es" "port" 9200) }}'
+GLOBAL_POMBA_CONTEXTAGGREGATOR_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "pomba-contextaggregator" "port" 9529) }}'
+
+# microservice bus info - everything is from the private oam network (also called onap private network)
+GLOBAL_MSB_SERVER_PROTOCOL = "https"
+GLOBAL_MSB_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "clamp" "port" 443) }}'
+# message router info - everything is from the private oam network (also called onap private network)
+GLOBAL_MR_SERVER_PROTOCOL = "http"
+GLOBAL_MR_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "message-router" "port" 3904) }}'
+# bus controller info
+GLOBAL_BC_SERVER_PROTOCOL = "https"
+GLOBAL_BC_HTTPS_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "dmaap-bc" "port" 8443) }}'
+GLOBAL_BC_USERNAME = '{{ .Values.bcUsername }}'
+GLOBAL_BC_PASSWORD = '{{ .Values.bcPassword }}'
+# dcae inventory and deployment handler info
+GLOBAL_INVENTORY_SERVER_NAME = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "inventory") }}'
+GLOBAL_INVENTORY_SERVER_PROTOCOL = "https"
+GLOBAL_INVENTORY_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "inventory" "port" 8080) }}'
+GLOBAL_DEPLOYMENT_HANDLER_SERVER_NAME = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "deployment-handler") }}'
+GLOBAL_DEPLOYMENT_HANDLER_SERVER_PROTOCOL = "https"
+GLOBAL_DEPLOYMENT_HANDLER_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "deployment-handler" "port" 8443) }}'
+# SO containers - everything is from the private oam network (also called onap private network)
+GLOBAL_SO_APIHAND_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so" "port" 8080) }}'
+GLOBAL_SO_SDCHAND_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-sdc-controller" "port" 8085) }}'
+GLOBAL_SO_BPMN_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-bpmn-infra" "port" 8081) }}'
+GLOBAL_SO_CATDB_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-catalog-db-adapter" "port" 8082) }}'
+GLOBAL_SO_OPENSTACK_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-openstack-adapter" "port" 8087) }}'
+GLOBAL_SO_REQDB_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-request-db-adapter" "port" 8083) }}'
+GLOBAL_SO_SDNC_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-sdnc-adapter" "port" 8086) }}'
+GLOBAL_SO_VFC_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-vfc-adapter" "port" 8084) }}'
+GLOBAL_SO_VNFM_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-vnfm-adapter" "port" 9092) }}'
+GLOBAL_SO_NSSMF_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "so-nssmf-adapter" "port" 8088) }}'
+GLOBAL_SO_USERNAME = '{{ .Values.soUsername }}'
+GLOBAL_SO_CATDB_USERNAME = '{{ .Values.soCatdbUsername }}'
+# robot uses SO_PASSWORD for both SO and CATDB
+GLOBAL_SO_PASSWORD = '{{ .Values.soPassword }}'
+GLOBAL_SO_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_APIHAND_IP_ADDR + ':' + GLOBAL_SO_APIHAND_SERVER_PORT
+GLOBAL_SO_APIHAND_ENDPOINT = GLOBAL_SO_ENDPOINT
+GLOBAL_SO_SDCHAND_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_SDCHAND_IP_ADDR + ':' + GLOBAL_SO_SDCHAND_SERVER_PORT
+GLOBAL_SO_BPMN_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_BPMN_IP_ADDR + ':' + GLOBAL_SO_BPMN_SERVER_PORT
+GLOBAL_SO_CATDB_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_CATDB_IP_ADDR + ':' + GLOBAL_SO_CATDB_SERVER_PORT
+GLOBAL_SO_OPENSTACK_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_OPENSTACK_IP_ADDR + ':' + GLOBAL_SO_OPENSTACK_SERVER_PORT
+GLOBAL_SO_REQDB_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_REQDB_IP_ADDR + ':' + GLOBAL_SO_REQDB_SERVER_PORT
+GLOBAL_SO_SDNC_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_SDNC_IP_ADDR + ':' + GLOBAL_SO_SDNC_SERVER_PORT
+GLOBAL_SO_VFC_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_VFC_IP_ADDR + ':' + GLOBAL_SO_VFC_SERVER_PORT
+GLOBAL_SO_VNFM_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_VNFM_IP_ADDR + ':' + GLOBAL_SO_VNFM_SERVER_PORT
+GLOBAL_SO_NSSMF_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_NSSMF_IP_ADDR + ':' + GLOBAL_SO_NSSMF_SERVER_PORT
+#GLOBAL_SO_VNFM_ENDPOINT = 'http://' + GLOBAL_INJECTED_SO_VNFM_IP_ADDR + ':' + GLOBAL_SO_VNFM_SERVER_PORT
+# music info - everything is from the private oam network (also called onap private network)
+GLOBAL_MUSIC_SERVER_PROTOCOL = "https"
+GLOBAL_MUSIC_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "music" "port" 8443) }}'
+# oof global info - everything is from the private oam network (also called onap private network)
+GLOBAL_OOF_SERVER_PROTOCOL = "https"
+# oof-homing info - everything is from the private oam network (also called onap private network)
+GLOBAL_OOF_HOMING_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "oof-has-api" "port" 8091) }}'
+GLOBAL_OOF_HOMING_USERNAME="{{ .Values.oofHomingUsername }}"
+GLOBAL_OOF_HOMING_PASSWORD="{{ .Values.oofHomingPassword }}"
+# oof-sniro info - everything is from the private oam network (also called onap private network)
+GLOBAL_OOF_SNIRO_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "oof-osdf" "port" 8698) }}'
+#oof user
+GLOBAL_OOF_OSDF_USERNAME="{{ .Values.oofUsername }}"
+GLOBAL_OOF_OSDF_PASSWORD="{{ .Values.oofPassword }}"
+#oof pci user
+GLOBAL_OOF_PCI_USERNAME="{{ .Values.oofOsdfPciOptUsername }}"
+GLOBAL_OOF_PCI_PASSWORD="{{ .Values.oofOsdfPciOptPassword }}"
+# oof cmso global info - everything is from the private oam network (also called onap private network)
+GLOBAL_OOF_CMSO_PROTOCOL = "https"
+GLOBAL_OOF_CMSO_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "oof-cmso" "port" 8080) }}'
+GLOBAL_OOF_CMSO_USERNAME = "{{ .Values.oofCmsoUsername }}"
+GLOBAL_OOF_CMSO_PASSWORD = "{{ .Values.oofCmsoPassword }}"
+# openstack info - info to select right info in environment
+# packet generate vnf info - everything is from the private oam network (also called onap private network)
+GLOBAL_PACKET_GENERATOR_PORT = "8183"
+GLOBAL_PACKET_GENERATOR_USERNAME = "admin"
+GLOBAL_PACKET_GENERATOR_PASSWORD = "admin"
+GLOBAL_PGN_PORT = "2831"
+# policy info - everything is from the private oam network (also called onap private network)
+GLOBAL_POLICY_SERVER_PROTOCOL = "https"
+GLOBAL_POLICY_SERVER_PORT = "8081"
+GLOBAL_POLICY_HEALTHCHECK_PORT = "6969"
+GLOBAL_POLICY_AUTH = '{{ .Values.policyAuth}}'
+GLOBAL_POLICY_CLIENTAUTH = '{{ .Values.policyClientAuth}}'
+GLOBAL_POLICY_USERNAME = '{{ .Values.policyUsername }}'
+GLOBAL_POLICY_PASSWORD = '{{ .Values.policyPassword }}'
+GLOBAL_POLICY_HEALTHCHECK_USERNAME = '{{ .Values.policyComponentUsername }}'
+GLOBAL_POLICY_HEALTHCHECK_PASSWORD = '{{ .Values.policyComponentPassword }}'
+# portal info - everything is from the private oam network (also called onap private network)
+GLOBAL_PORTAL_SERVER_PROTOCOL = "https"
+GLOBAL_PORTAL_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "portal-app" "port" 8443) }}'
+GLOBAL_PORTAL_USERNAME = '{{ .Values.portalUsername }}'
+GLOBAL_PORTAL_PASSWORD = '{{ .Values.portalPassword }}'
+# sdnc info - everything is from the private oam network (also called onap private network)
+GLOBAL_SDNC_SERVER_PROTOCOL = "https"
+GLOBAL_SDNC_REST_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "sdnc" "port" 8443) }}'
+GLOBAL_SDNC_ADMIN_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "sdnc-portal" "port" 8443) }}'
+GLOBAL_SDNC_USERNAME = '{{ .Values.sdncUsername }}'
+GLOBAL_SDNC_PASSWORD = '{{ .Values.sdncPassword }}'
+GLOBAL_SDNC_AUTHENTICATION = [GLOBAL_SDNC_USERNAME, GLOBAL_SDNC_PASSWORD]
+# sms (AAF) info
+GLOBAL_SMS_SERVER_PROTOCOL = "https"
+GLOBAL_SMS_SERVER_NAME = '{{include "robot.ingress.svchost" (dict "root" . "hostname" "aaf-sms") }}'
+GLOBAL_SMS_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "aaf-sms" "port" 10443) }}'
+# vid info - everything is from the private oam network (also called onap private network)
+GLOBAL_VID_SERVER_PROTOCOL = '{{ .Values.vidServerProtocol }}'
+GLOBAL_VID_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "clamp" "port" (.Values.vidServerPort | default 0 | int)) }}'
+GLOBAL_VID_USERNAME = '{{ .Values.vidUsername }}'
+GLOBAL_VID_PASSWORD = '{{ .Values.vidPassword}}'
+GLOBAL_VID_HEALTH_USERNAME = '{{ .Values.vidHealthUsername }}'
+GLOBAL_VID_HEALTH_PASSWORD = '{{ .Values.vidHealthPassword }}'
+# vnfsdk info - everything is from the private oam network (also called onap private network)
+GLOBAL_VNFSDK_SERVER_PROTOCOL = "https"
+GLOBAL_VNFSDK_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "refrepo" "port" 8703) }}'
+
+GLOBAL_DCAE_VES_PROTOCOL = "http"
+GLOBAL_DCAE_VES_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "dcae-ves-collector" "port" 8080) }}'
+GLOBAL_DCAE_VES_HTTPS_PROTOCOL = "https"
+GLOBAL_DCAE_VES_HTTPS_SERVER_PORT = '{{include "robot.ingress.port" (dict "root" . "hostname" "dcae-ves-collector-https" "port" 8443) }}'
+GLOBAL_DCAE_VES_USERNAME = 'sample1'
+GLOBAL_DCAE_VES_PASSWORD = 'sample1'
+
+
+#global selenium info
+GLOBAL_PROXY_WARNING_TITLE=""
+GLOBAL_PROXY_WARNING_CONTINUE_XPATH=""
+# dns info
+GLOBAL_DNS_TRAFFIC_DURATION = "600"
+# location where heat templates and data are loaded from
+GLOBAL_HEAT_TEMPLATES_FOLDER = "/var/opt/ONAP/demo/heat"
+GLOBAL_PRELOAD_DATA_FOLDER = "/var/opt/ONAP/demo/preload-data"
+# location where TOSCA artifacts are loaded from
+GLOBAL_TOSCA_ONBOARDING_PACKAGES_FOLDER = "/var/opt/ONAP/demo/tosca"
+
+
+# cds info - everything is from the private oam network (also called onap private network)
+GLOBAL_CCSDK_CDS_SERVER_PROTOCOL = "http"
+GLOBAL_CCSDK_CDS_HEALTH_SERVER_PORT = "8080"
+GLOBAL_CCSDK_CDS_USERNAME = 'ccsdkapps'
+GLOBAL_CCSDK_CDS_PASSWORD = 'ccsdkapps'
+GLOBAL_CCSDK_CDS_AUTHENTICATION = [GLOBAL_CCSDK_CDS_USERNAME, GLOBAL_CCSDK_CDS_PASSWORD]
+GLOBAL_CDS_AUTH = "Y2NzZGthcHBzOmNjc2RrYXBwcw=="
+
diff --git a/kubernetes/robot/resources/config/lighttpd/authorization b/kubernetes/robot/resources/config/lighttpd/authorization
new file mode 100644
index 0000000000..9d6ff7fc29
--- /dev/null
+++ b/kubernetes/robot/resources/config/lighttpd/authorization
@@ -0,0 +1 @@
+{{ .Values.config.robotWebUser }}:{{ .Values.config.robotWebPassword }}
diff --git a/kubernetes/robot/resources/config/lighttpd/lighttpd.conf b/kubernetes/robot/resources/config/lighttpd/lighttpd.conf
new file mode 100644
index 0000000000..6c781a240b
--- /dev/null
+++ b/kubernetes/robot/resources/config/lighttpd/lighttpd.conf
@@ -0,0 +1,57 @@
+server.document-root = "/var/opt/ONAP/html/"
+
+server.port = 88
+
+server.username = "www-data"
+server.groupname = "www-data"
+server.errorlog = "/var/log/lighttpd/error.log"
+accesslog.filename = "/var/log/lighttpd/access.log"
+
+
+$SERVER["socket"] == ":443" {
+ ssl.engine = "enable"
+ ssl.pemfile = "/etc/lighttpd/ssl/onap-robot.onap.pem"
+ # ssl.ca-file = "/etc/lighttpd/ssl/CA_issuing.crt"
+ server.name = "robot-onap.onap.org"
+ #server.document-root = "/var/opt/ONAP/html/"
+ #server.errorlog = "/var/log/lighttpd/robot_onap.onap.org.error.log"
+ #accesslog.filename = "/var/log/lighttpd/robot_onap.onap.org.access.log"
+}
+
+
+
+dir-listing.activate = "disable"
+
+alias.url = ( "/logs/" => "/share/logs/" )
+$HTTP["url"] =~ "^/logs/" {
+ dir-listing.activate = "enable"
+}
+
+mimetype.assign = (
+ ".log" => "text/plain",
+ ".txt" => "text/plain",
+ ".png" => "image/png",
+ ".html" => "text/html",
+ ".xml" => "text/xml"
+)
+
+static-file.exclude-extensions = ( ".fcgi", ".php", ".rb", "~", ".inc", ".cgi" )
+index-file.names = ( "index.html" )
+
+server.modules += ( "mod_auth" )
+server.modules += ( "mod_alias" )
+server.modules += ( "mod_compress" )
+server.modules += ( "mod_accesslog" )
+auth.debug = 2
+auth.backend = "plain"
+auth.backend.plain.userfile = "/etc/lighttpd/authorization"
+auth.require = ( "/" =>
+(
+"method" => "basic",
+"realm" => "Password protected area",
+"require" => "valid-user"
+)
+)
+
+compress.cache-dir = "/var/cache/lighttpd/compress/"
+compress.filetype = ("application/x-javascript", "text/css", "text/html", "text/plain")
diff --git a/kubernetes/robot/resources/config/lighttpd/ssl/README.TXT b/kubernetes/robot/resources/config/lighttpd/ssl/README.TXT
new file mode 100644
index 0000000000..4a56cb5dfa
--- /dev/null
+++ b/kubernetes/robot/resources/config/lighttpd/ssl/README.TXT
@@ -0,0 +1,13 @@
+The following self-signed certificates were created for testing only.
+You can replace them with your own if appropriate using this process or official certificates using what ever process is appropriate.
+
+
+# create key and csr
+openssl req -new -newkey rsa:2048 -nodes -keyout onap-robot.onap.key -out onap-robot.onap.csr
+
+# sign csr
+# 10 year self signed certificate
+openssl x509 -req -days 3650 -in onap-robot.onap.csr -signkey onap-robot.onap.key -out onap-robot.onap.crt
+
+# create PEM for lighttpd server
+cat onap-robot.onap.key onap-robot.onap.crt > onap-robot.onap.pem
diff --git a/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.crt b/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.crt
new file mode 100644
index 0000000000..e5db8b2abc
--- /dev/null
+++ b/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.crt
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDnzCCAocCFHlPuTueraFMC0IbbHUFBUlD/79aMA0GCSqGSIb3DQEBCwUAMIGL
+MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTkoxEzARBgNVBAcMCk1pZGRsZXRvd24x
+DTALBgNVBAoMBE9OQVAxDjAMBgNVBAsMBXJvYm90MRwwGgYDVQQDDBNyb2JvdC1v
+bmFwLm9uYXAub3JnMR0wGwYJKoZIhvcNAQkBFg5yb2JvdEBvbmFwLm9yZzAeFw0y
+MDAzMzAxNDM5MzlaFw0zMDAzMjgxNDM5MzlaMIGLMQswCQYDVQQGEwJVUzELMAkG
+A1UECAwCTkoxEzARBgNVBAcMCk1pZGRsZXRvd24xDTALBgNVBAoMBE9OQVAxDjAM
+BgNVBAsMBXJvYm90MRwwGgYDVQQDDBNyb2JvdC1vbmFwLm9uYXAub3JnMR0wGwYJ
+KoZIhvcNAQkBFg5yb2JvdEBvbmFwLm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBALxJ/YnjSMMLyY+DoGXEOi57AuVMNWJ6L43FJoy4MUGlWw1BT/gM
+0yXDlEYX7rFcP+/74CtK5VwL/T5WyGYehj4JanxeS+Debyx4iLR3Ge0So1TGVcLg
+wSwvU2Yt3GIpWSDsui6fBJLaH9ka4phf0mu55WljoA8814pyd2HCu/4MdPuWGISE
+PPeH6DzUDIQ8srUF3i9EkDwBtQtlj4XLSOeGaEo6DQNPABr5Ft8EkJ3jwi9uw92G
+ofU+PQeXszWGUcHMN12yEJErqsmlGJEi9Fr9c3tyWk7LLj0NPtdaPKAE2R9AUvFW
+1WmJ/Lb8lkHezj23V8+wlCbJJkcV8+E3c90CAwEAATANBgkqhkiG9w0BAQsFAAOC
+AQEAIW7KCq4gqjVQQ5aFqSEmbS65pAeyrt58NQxj6UgOYrHe+IJ5EHLiKmRW6XGe
+RbtYXSrBhj6E0wF7mWda5sd5r+GZuMjx3BkRKCo59yQnHKYheuuoXIFx1odkmBdl
+uq8ccVsL3W+xoBuMdhW3E7Q83xvE+0lINki9rhrmdGItHee3y5KyBYopf0BjZtCS
+EV601NnSBDktlMPSt4pFfptr4+lUh/6MDj548cygwk9NexFCwY7+YhjEAPfqH6p3
+BI3QlDYQslBCv8By4JtQqhFs3t6OAjSILfwQfxdS4OT2FK6NZyEBpd1mdbS3CyS7
+qke87AT49x1wbGgsSeldZShErg==
+-----END CERTIFICATE-----
diff --git a/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.csr b/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.csr
new file mode 100644
index 0000000000..5e4cc9ff5c
--- /dev/null
+++ b/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.csr
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIIC0TCCAbkCAQAwgYsxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOSjETMBEGA1UE
+BwwKTWlkZGxldG93bjENMAsGA1UECgwET05BUDEOMAwGA1UECwwFcm9ib3QxHDAa
+BgNVBAMME3JvYm90LW9uYXAub25hcC5vcmcxHTAbBgkqhkiG9w0BCQEWDnJvYm90
+QG9uYXAub3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvEn9ieNI
+wwvJj4OgZcQ6LnsC5Uw1YnovjcUmjLgxQaVbDUFP+AzTJcOURhfusVw/7/vgK0rl
+XAv9PlbIZh6GPglqfF5L4N5vLHiItHcZ7RKjVMZVwuDBLC9TZi3cYilZIOy6Lp8E
+ktof2RrimF/Sa7nlaWOgDzzXinJ3YcK7/gx0+5YYhIQ894foPNQMhDyytQXeL0SQ
+PAG1C2WPhctI54ZoSjoNA08AGvkW3wSQnePCL27D3Yah9T49B5ezNYZRwcw3XbIQ
+kSuqyaUYkSL0Wv1ze3JaTssuPQ0+11o8oATZH0BS8VbVaYn8tvyWQd7OPbdXz7CU
+JskmRxXz4Tdz3QIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAJYVG3C4fgHb7Vip
+aqsyRI6kQNjxFwwjnHY0AEwuzr1K+D2dGXXDtTP9RPZQIp3PZyvKrZNqs4O45290
+lvY+bytW1WLQ6h3cZqKgIUEXU2tycLITPQDz+Fu45R20a1RT65EWv523mpgzJoOY
+61V9aVB1KKfPr4cddMHbq9fYvUKz2KVDTNBVQSYcO2125hv1xpUa1ZbZN15SJazH
+JWW9FMTCSgWp6yYTBYKAl8S8WTNBeAKJq1ZVw1q/KhHpj+jqjm7rn6lgIQi9gTES
+DNP4j5SBdL0O5HIk5KqW/3uhS0YlL9Yi9Cqxo35RAbMkUMKDdqtlTObU3tH/jtXC
+2YPImm4=
+-----END CERTIFICATE REQUEST-----
diff --git a/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.key b/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.key
new file mode 100644
index 0000000000..7bf9fc0c3a
--- /dev/null
+++ b/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC8Sf2J40jDC8mP
+g6BlxDouewLlTDViei+NxSaMuDFBpVsNQU/4DNMlw5RGF+6xXD/v++ArSuVcC/0+
+VshmHoY+CWp8Xkvg3m8seIi0dxntEqNUxlXC4MEsL1NmLdxiKVkg7LounwSS2h/Z
+GuKYX9JrueVpY6APPNeKcndhwrv+DHT7lhiEhDz3h+g81AyEPLK1Bd4vRJA8AbUL
+ZY+Fy0jnhmhKOg0DTwAa+RbfBJCd48IvbsPdhqH1Pj0Hl7M1hlHBzDddshCRK6rJ
+pRiRIvRa/XN7clpOyy49DT7XWjygBNkfQFLxVtVpify2/JZB3s49t1fPsJQmySZH
+FfPhN3PdAgMBAAECggEAXqbPRm60GCCnzXY8vou7N7xswznhKEUF/L2b2GeLk1V9
+hRdNDQPdbQF5WxcSzaPzHD/A++JllDUOtDTJ/tPfZfYzyptYh40p0/AvwJCSvuN8
+qdQeLp3vXbfYs+ff/pSMkomOOnuDwBLYqbnjKPgafj7v/V+wiZhrPdS3ALT07gsW
+MKb4MjAYKwFCbENgMurb4VNL+LgwJFXJNvlRW3r1IO37aXdGiHO5XdrQ6aNDzfmR
+ljaAndScwpleRwZorl4Gh0JPn46fpkJPQPNAcejKs1zzxbdjtd/xBS84DuOMryfT
+hEfiyU0t5kfir4XufLxtUB9M0FkmpPUQ24Rt5x8/4QKBgQDjYCMbxRNw3o+p1jgR
+TsWx2hExhvQJHKPT1MwFcOTqA+VoaPyMUsT5tdkVr2xdqyVjZDbUWQOd5hll6urL
+thvYSJz09eGGPuHTMvsktwer/oW3FAIbkPEA0a7EHaNuGbacLVtPn79vJ9IyQl1B
+J4DALDW3mU05FV83PWZwDQe1hQKBgQDT/iwnALH7P5IC6dLwSfqFZU8BJ3BrTMs0
+708Oak+iHOWWWkTEpR1PF3EPkM1sdrscLcV6IiqNNRIjOKTbNtloSGMPAyVAxDxb
+NiJseP0a3w/XzDiO5H1x3HmYvf2tYLss9cjUKG5ikeVZhfgcUY3pypJ9BcCuCHjg
+vkk0dtSIeQKBgQDcQLhBojoez7jij/xn56JhkhetnkiuuuuHu06WxrE4buW+qlRZ
+qaPdMGZFN1UFt0/UY7MpJ8D1J3va2iDvjEyUymIgeD4qSKeAapYVDqeD2F225s7T
+3SdX38Yne35gnM/joZweGWTbS/u1a8vh3V4+yZSPcRT5AtblL7kXOcgJ8QKBgDbH
+nFMry9IX2DwmfdqX00O7LRIoRdWET5JLMIuSDyfd/bm/c7u4yv0QGqgiC+M2Px2F
++lyYDZTVnO07CuaWotFeuzI65tp4mvwa3gRXs1fJQkfKPuc1xfCxPz6oAbBWqadx
+j8a69+NCvhhHFRkEPf6m2Pmzjdmq2MmCglorRrpJAoGBAMNRu3it7Pp+9xgj3aFe
+IfcnVwd3vwzXyw2ABinHXOqSRx52Dm4J7QDY9d+vOrcbwZyEIt5QdBJesAREAuLf
+GsM0FH2qi02kJ679P/fhnbq9B9dPYxpGFV5LlZWN/ulW74fFzIuVB3yPsS3POnm1
+xDhKSqihCH6X84ub/5Qz9boQ
+-----END PRIVATE KEY-----
diff --git a/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.pem b/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.pem
new file mode 100644
index 0000000000..abf8f81c58
--- /dev/null
+++ b/kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.pem
@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC8Sf2J40jDC8mP
+g6BlxDouewLlTDViei+NxSaMuDFBpVsNQU/4DNMlw5RGF+6xXD/v++ArSuVcC/0+
+VshmHoY+CWp8Xkvg3m8seIi0dxntEqNUxlXC4MEsL1NmLdxiKVkg7LounwSS2h/Z
+GuKYX9JrueVpY6APPNeKcndhwrv+DHT7lhiEhDz3h+g81AyEPLK1Bd4vRJA8AbUL
+ZY+Fy0jnhmhKOg0DTwAa+RbfBJCd48IvbsPdhqH1Pj0Hl7M1hlHBzDddshCRK6rJ
+pRiRIvRa/XN7clpOyy49DT7XWjygBNkfQFLxVtVpify2/JZB3s49t1fPsJQmySZH
+FfPhN3PdAgMBAAECggEAXqbPRm60GCCnzXY8vou7N7xswznhKEUF/L2b2GeLk1V9
+hRdNDQPdbQF5WxcSzaPzHD/A++JllDUOtDTJ/tPfZfYzyptYh40p0/AvwJCSvuN8
+qdQeLp3vXbfYs+ff/pSMkomOOnuDwBLYqbnjKPgafj7v/V+wiZhrPdS3ALT07gsW
+MKb4MjAYKwFCbENgMurb4VNL+LgwJFXJNvlRW3r1IO37aXdGiHO5XdrQ6aNDzfmR
+ljaAndScwpleRwZorl4Gh0JPn46fpkJPQPNAcejKs1zzxbdjtd/xBS84DuOMryfT
+hEfiyU0t5kfir4XufLxtUB9M0FkmpPUQ24Rt5x8/4QKBgQDjYCMbxRNw3o+p1jgR
+TsWx2hExhvQJHKPT1MwFcOTqA+VoaPyMUsT5tdkVr2xdqyVjZDbUWQOd5hll6urL
+thvYSJz09eGGPuHTMvsktwer/oW3FAIbkPEA0a7EHaNuGbacLVtPn79vJ9IyQl1B
+J4DALDW3mU05FV83PWZwDQe1hQKBgQDT/iwnALH7P5IC6dLwSfqFZU8BJ3BrTMs0
+708Oak+iHOWWWkTEpR1PF3EPkM1sdrscLcV6IiqNNRIjOKTbNtloSGMPAyVAxDxb
+NiJseP0a3w/XzDiO5H1x3HmYvf2tYLss9cjUKG5ikeVZhfgcUY3pypJ9BcCuCHjg
+vkk0dtSIeQKBgQDcQLhBojoez7jij/xn56JhkhetnkiuuuuHu06WxrE4buW+qlRZ
+qaPdMGZFN1UFt0/UY7MpJ8D1J3va2iDvjEyUymIgeD4qSKeAapYVDqeD2F225s7T
+3SdX38Yne35gnM/joZweGWTbS/u1a8vh3V4+yZSPcRT5AtblL7kXOcgJ8QKBgDbH
+nFMry9IX2DwmfdqX00O7LRIoRdWET5JLMIuSDyfd/bm/c7u4yv0QGqgiC+M2Px2F
++lyYDZTVnO07CuaWotFeuzI65tp4mvwa3gRXs1fJQkfKPuc1xfCxPz6oAbBWqadx
+j8a69+NCvhhHFRkEPf6m2Pmzjdmq2MmCglorRrpJAoGBAMNRu3it7Pp+9xgj3aFe
+IfcnVwd3vwzXyw2ABinHXOqSRx52Dm4J7QDY9d+vOrcbwZyEIt5QdBJesAREAuLf
+GsM0FH2qi02kJ679P/fhnbq9B9dPYxpGFV5LlZWN/ulW74fFzIuVB3yPsS3POnm1
+xDhKSqihCH6X84ub/5Qz9boQ
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDnzCCAocCFHlPuTueraFMC0IbbHUFBUlD/79aMA0GCSqGSIb3DQEBCwUAMIGL
+MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTkoxEzARBgNVBAcMCk1pZGRsZXRvd24x
+DTALBgNVBAoMBE9OQVAxDjAMBgNVBAsMBXJvYm90MRwwGgYDVQQDDBNyb2JvdC1v
+bmFwLm9uYXAub3JnMR0wGwYJKoZIhvcNAQkBFg5yb2JvdEBvbmFwLm9yZzAeFw0y
+MDAzMzAxNDM5MzlaFw0zMDAzMjgxNDM5MzlaMIGLMQswCQYDVQQGEwJVUzELMAkG
+A1UECAwCTkoxEzARBgNVBAcMCk1pZGRsZXRvd24xDTALBgNVBAoMBE9OQVAxDjAM
+BgNVBAsMBXJvYm90MRwwGgYDVQQDDBNyb2JvdC1vbmFwLm9uYXAub3JnMR0wGwYJ
+KoZIhvcNAQkBFg5yb2JvdEBvbmFwLm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBALxJ/YnjSMMLyY+DoGXEOi57AuVMNWJ6L43FJoy4MUGlWw1BT/gM
+0yXDlEYX7rFcP+/74CtK5VwL/T5WyGYehj4JanxeS+Debyx4iLR3Ge0So1TGVcLg
+wSwvU2Yt3GIpWSDsui6fBJLaH9ka4phf0mu55WljoA8814pyd2HCu/4MdPuWGISE
+PPeH6DzUDIQ8srUF3i9EkDwBtQtlj4XLSOeGaEo6DQNPABr5Ft8EkJ3jwi9uw92G
+ofU+PQeXszWGUcHMN12yEJErqsmlGJEi9Fr9c3tyWk7LLj0NPtdaPKAE2R9AUvFW
+1WmJ/Lb8lkHezj23V8+wlCbJJkcV8+E3c90CAwEAATANBgkqhkiG9w0BAQsFAAOC
+AQEAIW7KCq4gqjVQQ5aFqSEmbS65pAeyrt58NQxj6UgOYrHe+IJ5EHLiKmRW6XGe
+RbtYXSrBhj6E0wF7mWda5sd5r+GZuMjx3BkRKCo59yQnHKYheuuoXIFx1odkmBdl
+uq8ccVsL3W+xoBuMdhW3E7Q83xvE+0lINki9rhrmdGItHee3y5KyBYopf0BjZtCS
+EV601NnSBDktlMPSt4pFfptr4+lUh/6MDj548cygwk9NexFCwY7+YhjEAPfqH6p3
+BI3QlDYQslBCv8By4JtQqhFs3t6OAjSILfwQfxdS4OT2FK6NZyEBpd1mdbS3CyS7
+qke87AT49x1wbGgsSeldZShErg==
+-----END CERTIFICATE-----
diff --git a/kubernetes/robot/scripts/demoscript/README b/kubernetes/robot/scripts/demoscript/README
new file mode 100644
index 0000000000..aad63f4b24
--- /dev/null
+++ b/kubernetes/robot/scripts/demoscript/README
@@ -0,0 +1 @@
+Directory contains scripts that will be run before 'demo' tests.
diff --git a/kubernetes/robot/scripts/etescript/README b/kubernetes/robot/scripts/etescript/README
new file mode 100644
index 0000000000..380787e16e
--- /dev/null
+++ b/kubernetes/robot/scripts/etescript/README
@@ -0,0 +1 @@
+Directory contains scripts that will be run before 'ete' tests.
diff --git a/kubernetes/robot/scripts/etescript/hvves-etescript.sh b/kubernetes/robot/scripts/etescript/hvves-etescript.sh
new file mode 100755
index 0000000000..5d22c4b4fe
--- /dev/null
+++ b/kubernetes/robot/scripts/etescript/hvves-etescript.sh
@@ -0,0 +1,77 @@
+# Copyright © 2019 Nokia
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/bin/bash
+
+#
+# Generate HV-VES SSL related certs.
+# Copy the stuff to HV-VES and Robot pods.
+#
+
+
+HVVESPOD=$(kubectl -n $NAMESPACE get pods --no-headers=true -o custom-columns=:metadata.name | grep hv-ves)
+
+
+generate_ca_key_cert () {
+ openssl genrsa -out $1/ca.key 2048
+ openssl req -new -x509 -days 36500 -key $1/ca.key -out $1/ca.pem -subj /CN=dcae-hv-ves-ca.onap
+}
+
+generate_server_key_csr () {
+ openssl genrsa -out $1/server.key 2048
+ openssl req -new -key $1/server.key -out $1/server.csr -subj /CN=dcae-hv-ves-collector.onap
+}
+
+generate_client_key_csr () {
+ openssl genrsa -out $1/client.key 2048
+ openssl req -new -key $1/client.key -out $1/client.csr -subj /CN=dcae-hv-ves-client.onap
+}
+
+sign_server_and_client_cert () {
+ openssl x509 -req -days 36500 -in $1/server.csr -CA $1/ca.pem -CAkey $1/ca.key -out $1/server.pem -set_serial 00
+ openssl x509 -req -days 36500 -in $1/client.csr -CA $1/ca.pem -CAkey $1/ca.key -out $1/client.pem -set_serial 00
+}
+
+create_pkcs12_ca_and_server () {
+ openssl pkcs12 -export -out $1/ca.p12 -inkey $1/ca.key -in $1/ca.pem -passout pass:
+ openssl pkcs12 -export -out $1/server.p12 -inkey $1/server.key -in $1/server.pem -passout pass:
+}
+
+copy_server_certs_to_hvves () {
+ for f in {ca.p12,server.p12}
+ do
+ kubectl cp $1/$f $2/$3:$4
+ done
+}
+
+copy_client_certs_to_robot () {
+ for f in {ca.pem,client.key,client.pem}
+ do
+ kubectl cp $1/$f $2/$3:$4
+ done
+}
+
+cleanup () {
+ rm -f $1/{ca,server,client}.???
+}
+
+
+generate_ca_key_cert "$DIR/$SCRIPTDIR"
+generate_server_key_csr "$DIR/$SCRIPTDIR"
+generate_client_key_csr "$DIR/$SCRIPTDIR"
+sign_server_and_client_cert "$DIR/$SCRIPTDIR"
+create_pkcs12_ca_and_server "$DIR/$SCRIPTDIR"
+copy_server_certs_to_hvves "$DIR/$SCRIPTDIR" "$NAMESPACE" "$HVVESPOD" "/tmp"
+copy_client_certs_to_robot "$DIR/$SCRIPTDIR" "$NAMESPACE" "$POD" "/tmp"
+cleanup "$DIR/$SCRIPTDIR"
diff --git a/kubernetes/robot/scripts/etescript/security-etescript.sh b/kubernetes/robot/scripts/etescript/security-etescript.sh
new file mode 100755
index 0000000000..1cd911ca60
--- /dev/null
+++ b/kubernetes/robot/scripts/etescript/security-etescript.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+# Copyright 2019 Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Gather information on ONAP cluster required by security tests.
+# Copy results to Robot pod.
+#
+
+
+TMPDIR='/tmp'
+TMPTPL='onap_security'
+CSV2JSON='import csv; import json; import sys; print(json.dumps({i[0]: i[1] for i in csv.reader(sys.stdin)}))'
+FILTER="$(tr -d [:space:] <<TEMPLATE
+{{range .items}}
+ {{range.spec.ports}}
+ {{if .nodePort}}
+ {{.nodePort}}{{','}}{{.name}}{{'\n'}}
+ {{end}}
+ {{end}}
+{{end}}
+TEMPLATE)"
+
+
+setup () {
+ export NODEPORTS_FILE="$(mktemp -p ${TMPDIR} ${TMPTPL}XXX)"
+}
+
+create_actual_nodeport_json () {
+ kubectl get svc -n $NAMESPACE -o go-template="$FILTER" | python3 -c "$CSV2JSON" > "$NODEPORTS_FILE"
+}
+
+copy_actual_nodeport_json_to_robot () {
+ kubectl cp "$1" "$2/$3:$4"
+}
+
+cleanup () {
+ rm "$NODEPORTS_FILE"
+}
+
+
+setup
+create_actual_nodeport_json
+copy_actual_nodeport_json_to_robot "$NODEPORTS_FILE" "$NAMESPACE" "$POD" "$TMPDIR"
+cleanup
diff --git a/kubernetes/robot/scripts/etescript/vnfsdk-etescript.sh b/kubernetes/robot/scripts/etescript/vnfsdk-etescript.sh
new file mode 100755
index 0000000000..f1d39691bf
--- /dev/null
+++ b/kubernetes/robot/scripts/etescript/vnfsdk-etescript.sh
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: Apache-2.0
+
+#!/bin/bash
+
+#
+# Create root certificate CA (Certificate Authority) and its private key.
+# Create the package certificate issued by CA
+# Copy the stuff to SDC ONBOARDING and Robot pods.
+#
+
+
+
+SDCVALID=sdc-valid
+SDCINVALID=sdc-invalid
+ROBOTPOD=$(kubectl -n $NAMESPACE get pods --no-headers=true -o custom-columns=:metadata.name | grep robot )
+SDCONBOARDINGPOD=$(kubectl -n $NAMESPACE get pods --no-headers=true -o custom-columns=:metadata.name | grep sdc-onboarding-be | grep -v cassandra)
+
+generate_ca_key_cert_and_package_cert_issued_by_CA () {
+ openssl req -batch -new -nodes -x509 -days 36500 -keyout rootCA-private-robot-$1.key -out rootCA-robot-$1.cert
+ openssl req -batch -new -nodes -keyout package-private-robot-$1.key -out package-robot-$1.csr
+ openssl x509 -req -CA rootCA-robot-$1.cert -CAkey rootCA-private-robot-$1.key -CAcreateserial -in package-robot-$1.csr -out package-robot-$1.cert
+}
+
+
+copy_root_cert_to_sdc_onboarding () {
+ kubectl cp $1/rootCA-robot-$5.cert $2/$3:$4
+}
+
+copy_package_certs_to_robot () {
+ for f in package-robot-$5.cert package-private-robot-$5.key
+ do
+ kubectl cp $1/$f $2/$3:$4
+ done
+}
+
+mkdir "$DIR/$SCRIPTDIR/tmp"
+cd "$DIR/$SCRIPTDIR/tmp"
+if [[ -f rootCA-robot-$SDCVALID.cert && -f package-robot-$SDCVALID.cert && -f package-robot-$SDCINVALID.cert && -f package-private-robot-$SDCVALID.key && -f package-private-robot-$SDCINVALID.key ]]; then
+ echo "All files are present";
+else
+ generate_ca_key_cert_and_package_cert_issued_by_CA $SDCVALID
+ generate_ca_key_cert_and_package_cert_issued_by_CA $SDCINVALID
+
+fi
+cd ../../..
+copy_root_cert_to_sdc_onboarding "$DIR/$SCRIPTDIR/tmp" "$NAMESPACE" "$SDCONBOARDINGPOD" "/var/lib/jetty/cert" $SDCVALID
+copy_package_certs_to_robot "$DIR/$SCRIPTDIR/tmp" "$NAMESPACE" "$ROBOTPOD" "/tmp" $SDCVALID
+copy_package_certs_to_robot "$DIR/$SCRIPTDIR/tmp" "$NAMESPACE" "$ROBOTPOD" "/tmp" $SDCINVALID
+
diff --git a/kubernetes/robot/scripts/helmscript/README b/kubernetes/robot/scripts/helmscript/README
new file mode 100644
index 0000000000..7666bf5f02
--- /dev/null
+++ b/kubernetes/robot/scripts/helmscript/README
@@ -0,0 +1 @@
+Directory contains scripts that will be run before 'eteHelm' tests.
diff --git a/kubernetes/robot/templates/NOTES.txt b/kubernetes/robot/templates/NOTES.txt
new file mode 100644
index 0000000000..fe7c9313df
--- /dev/null
+++ b/kubernetes/robot/templates/NOTES.txt
@@ -0,0 +1,25 @@
+# Copyright © 2018 Amdocs, Bell Canada
+#
+# This file is licensed under the CREATIVE COMMONS ATTRIBUTION 4.0 INTERNATIONAL LICENSE
+#
+# Full license text at https://creativecommons.org/licenses/by/4.0/legalcode
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/robot/templates/_ingress_svc_hostname.tpl b/kubernetes/robot/templates/_ingress_svc_hostname.tpl
new file mode 100644
index 0000000000..160bfbf868
--- /dev/null
+++ b/kubernetes/robot/templates/_ingress_svc_hostname.tpl
@@ -0,0 +1,157 @@
+{{/*
+# Copyright © 2020 Samsung Electronics
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+
+{{/*
+ For internal use only!
+
+ Return true value if robot via ingress test is enabled
+
+ The template takes two arguments:
+ - .parent: environment (.)
+ - .key: robot test component name
+*/}}
+
+{{- define "robot.ingress.svchost._isen" -}}
+ {{- $key := .key -}}
+ {{- $master := .parent.Values.config.useIngressHost -}}
+ {{- if hasKey $master "enabled" -}}
+ {{- if (index $master "enabled") -}}
+ {{- if hasKey $master $key -}}
+ {{- $en_parent := (index $master $key) -}}
+ {{- if hasKey $en_parent "enabled" -}}
+ {{- default "" (index $en_parent "enabled") -}}
+ {{- else -}}
+ {{- "" -}}
+ {{- end -}}
+ {{- else -}}
+ {{- "" -}}
+ {{- end -}}
+ {{- else -}}
+ {{- "" -}}
+ {{- end -}}
+ {{- else -}}
+ {{- "" -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+ For internal use only!
+
+ Return ingress alternative hotname if present
+
+ The template takes two arguments:
+ - .parent: environment (.)
+ - .key: robot test component name
+*/}}
+
+{{- define "robot.ingress.svchost._inghost" -}}
+ {{- $key := .key -}}
+ {{- $master := .parent.Values.config.useIngressHost -}}
+ {{- if hasKey $master $key -}}
+ {{- $h_parent := (index $master $key) -}}
+ {{- if hasKey $h_parent "hostname" -}}
+ {{- default "" (index $h_parent "hostname") -}}
+ {{- else -}}
+ {{- "" -}}
+ {{- end -}}
+ {{- else -}}
+ {{- "" -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+ For internal use only!
+
+ Return robot target port depending on the robot test configuration
+ or default value if config is not available
+
+ The template takes two arguments:
+ - .parent: environment (.)
+ - .key: robot test component name
+*/}}
+{{- define "robot.ingress.svchost._port" -}}
+ {{- $key := .key -}}
+ {{- $master := .parent.Values.config.useIngressHost -}}
+ {{- if hasKey $master $key -}}
+ {{- $https_parent := (index $master $key) -}}
+ {{- if hasKey $https_parent "https" -}}
+ {{- $ishttps := (index $https_parent "https") -}}
+ {{- ternary 443 80 $ishttps -}}
+ {{- else -}}
+ {{- 80 -}}
+ {{- end -}}
+ {{- else -}}
+ {{- 80 -}}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+ Return the hostname for tested compoment by robot
+ if the ingress is enabled it return cluster ingress
+ controller hostname. If the ingress controller in robot
+ test is disabled it returns the internal cluster hostname
+
+ The template takes two arguments:
+ - .root: root environment (.)
+ - .hostname: basename of host
+
+ Return string target hostname for robot test on particular component
+*/}}
+{{- define "robot.ingress.svchost" -}}
+ {{- $hostname := required "service hostname" .hostname -}}
+ {{- $tplhname := $hostname | replace "-" "_" -}}
+ {{- $ingress_enabled := include "robot.ingress.svchost._isen" (dict "parent" .root "key" $tplhname) -}}
+ {{- if $ingress_enabled -}}
+ {{- if .root.Values.global.ingress -}}
+ {{- if .root.Values.global.ingress.virtualhost -}}
+ {{- $domain := .root.Values.global.ingress.virtualhost.baseurl -}}
+ {{- $ihostname := default $hostname (include "robot.ingress.svchost._inghost" (dict "parent" .root "key" $tplhname)) -}}
+ {{- printf "%s.%s" $ihostname $domain -}}
+ {{- end -}}
+ {{- end -}}
+ {{- else -}}
+ {{- $domain := include "common.namespace" .root -}}
+ {{- printf "%s.%s" $hostname $domain -}}
+ {{- end -}}
+{{- end -}}
+
+
+{{/*
+ Return the target port for the robot testing purpose
+ if the ingress is enabled it return cluster ingress
+ controller port. If the target port doesn't exists
+ it return default port
+
+ The template takes three arguments:
+ - .root: root environment (.)
+ - .hostname: basename of host
+ - .port Default target port
+
+ Return target port for tested components
+*/}}
+{{- define "robot.ingress.port" -}}
+ {{- $hostname := required "service hostname" .hostname -}}
+ {{- $port := required "service port" .port -}}
+ {{- $tplhname := $hostname | replace "-" "_" -}}
+ {{- $ingress_enabled := include "robot.ingress.svchost._isen" (dict "parent" .root "key" $tplhname) -}}
+ {{- if $ingress_enabled -}}
+ {{- include "robot.ingress.svchost._port" (dict "parent" .root "key" $tplhname) -}}
+ {{- else -}}
+ {{- printf "%d" $port -}}
+ {{- end -}}
+{{- end -}}
+
diff --git a/kubernetes/robot/templates/configmap.yaml b/kubernetes/robot/templates/configmap.yaml
new file mode 100644
index 0000000000..5b41aa4176
--- /dev/null
+++ b/kubernetes/robot/templates/configmap.yaml
@@ -0,0 +1,52 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-eteshare-configmap
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ include "common.release" . }}
+ heritage: {{ .Release.Service }}
+data:
+{{ tpl (.Files.Glob "resources/config/eteshare/config/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-lighttpd-configmap
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ include "common.release" . }}
+ heritage: {{ .Release.Service }}
+data:
+{{ tpl (.Files.Glob "resources/config/lighttpd/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-lighttpd-ssl-configmap
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+{{ tpl (.Files.Glob "resources/config/lighttpd/ssl/*").AsConfig . | indent 2 }}
diff --git a/kubernetes/robot/templates/deployment.yaml b/kubernetes/robot/templates/deployment.yaml
new file mode 100644
index 0000000000..a466189c22
--- /dev/null
+++ b/kubernetes/robot/templates/deployment.yaml
@@ -0,0 +1,117 @@
+# Copyright © 2018 Amdocs, Bell Canada, AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ include "common.release" . }}
+ heritage: {{ .Release.Service }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ include "common.name" . }}
+ replicas: {{ .Values.replicaCount }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}
+ release: {{ include "common.release" . }}
+ spec:
+ hostAliases:
+ - ip: {{ .Values.kubernetesExternalIp }}
+ hostnames:
+ - "portal.api.simpledemo.onap.org"
+ - "vid.api.simpledemo.onap.org"
+ - "sdc.api.fe.simpledemo.onap.org"
+ - "portal-sdk.simpledemo.onap.org"
+ - "policy.api.simpledemo.onap.org"
+ - "aai.api.sparky.simpledemo.onap.org"
+ - "aai.ui.simpledemo.onap.org"
+ - "cli.api.simpledemo.onap.org"
+ - "so-monitoring"
+ containers:
+ - name: {{ include "common.name" . }}
+ image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ ports:
+ - containerPort: {{ .Values.service.internalPort }}
+ {{ if .Values.liveness.enabled }}
+ livenessProbe:
+ tcpSocket:
+ port: {{ .Values.service.internalPort }}
+ initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+ periodSeconds: {{ .Values.liveness.periodSeconds }}
+ {{ end }}
+ readinessProbe:
+ tcpSocket:
+ port: {{ .Values.service.internalPort }}
+ initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readiness.periodSeconds }}
+ volumeMounts:
+ - name: dshm
+ mountPath: /dev/shm
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: robot-eteshare
+ mountPath: /share/config
+ - name: robot-lighttpd
+ mountPath: /etc/lighttpd
+ - name: robot-lighttpd-ssl
+ mountPath: /etc/lighttpd/ssl
+ - name: robot-logs
+ mountPath: /share/logs
+ resources:
+{{ include "common.resources" . }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end -}}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ volumes:
+ {{- if .Values.persistence.enabled }}
+ - name: robot-logs
+ persistentVolumeClaim:
+ claimName: {{ include "common.fullname" . }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - name: dshm
+ emptyDir:
+ medium: Memory
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: robot-eteshare
+ configMap:
+ name: {{ include "common.fullname" . }}-eteshare-configmap
+ defaultMode: 0755
+ - name: robot-lighttpd
+ configMap:
+ name: {{ include "common.fullname" . }}-lighttpd-configmap
+ defaultMode: 0755
+ - name: robot-lighttpd-ssl
+ configMap:
+ name: {{ include "common.fullname" . }}-lighttpd-ssl-configmap
+ defaultMode: 0600
+ imagePullSecrets:
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/robot/templates/ingress.yaml b/kubernetes/robot/templates/ingress.yaml
new file mode 100644
index 0000000000..8f87c68f1e
--- /dev/null
+++ b/kubernetes/robot/templates/ingress.yaml
@@ -0,0 +1 @@
+{{ include "common.ingress" . }}
diff --git a/kubernetes/robot/templates/pv.yaml b/kubernetes/robot/templates/pv.yaml
new file mode 100644
index 0000000000..1226402372
--- /dev/null
+++ b/kubernetes/robot/templates/pv.yaml
@@ -0,0 +1,40 @@
+{{/*
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+{{- if eq "True" (include "common.needPV" .) -}}
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: {{ include "common.release" . }}
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ include "common.release" . }}/{{ .Values.persistence.mountSubPath }}
+{{- end -}}
+{{- end -}}
diff --git a/kubernetes/robot/templates/pvc.yaml b/kubernetes/robot/templates/pvc.yaml
new file mode 100644
index 0000000000..6c18ca0628
--- /dev/null
+++ b/kubernetes/robot/templates/pvc.yaml
@@ -0,0 +1,39 @@
+{{/*
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: {{ include "common.release" . }}
+ heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: {{ include "common.storageClass" . }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- end -}}
diff --git a/kubernetes/robot/templates/service.yaml b/kubernetes/robot/templates/service.yaml
new file mode 100644
index 0000000000..d1e3bec6b3
--- /dev/null
+++ b/kubernetes/robot/templates/service.yaml
@@ -0,0 +1,39 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "common.servicename" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ include "common.release" . }}
+ heritage: {{ .Release.Service }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ {{if eq .Values.service.type "NodePort" -}}
+ - port: {{ .Values.service.externalPort }}
+ nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
+ name: {{ .Values.service.portName | default "http" }}
+ {{- else -}}
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ name: {{ .Values.service.portName | default "http" }}
+ {{- end}}
+ selector:
+ app: {{ include "common.name" . }}
+ release: {{ include "common.release" . }}
diff --git a/kubernetes/robot/values.yaml b/kubernetes/robot/values.yaml
new file mode 100644
index 0000000000..bbc4a952de
--- /dev/null
+++ b/kubernetes/robot/values.yaml
@@ -0,0 +1,453 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global: # global defaults
+ nodePortPrefix: 302
+ ubuntuInitRepository: registry.hub.docker.com
+ persistence: {}
+
+# application image
+repository: nexus3.onap.org:10001
+image: onap/testsuite:1.7.2
+pullPolicy: Always
+
+ubuntuInitImage: oomk8s/ubuntu-init:2.0.0
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+config:
+ useIngressHost:
+ enabled: false
+ aaf_service:
+ enabled: true
+ https: true
+ hostname: aafservice
+ aai:
+ enabled: true
+ https: true
+ hostname: aai.api.sparky
+ appc:
+ enabled: true
+ https: true
+ hostname: appc.api
+ appc_cdt:
+ enabled: true
+ https: true
+ hostname: appccdt
+ clamp:
+ enabled: true
+ https: true
+ cli:
+ enabled: true
+ https: true
+ dcae_healthcheck:
+ enabled: false
+ dcae_ves_collector:
+ enabled: false
+ dcae_ves_colector_https:
+ enabled: false
+ https: true
+ dmaap_dr_prov:
+ enabled: true
+ https: true
+ hostname: dmaapdrprov
+ dmaap_dr_node:
+ enabled: true
+ https: true
+ hostname: dmaapdrnode
+ log_es:
+ enabled: false
+ log_kibana:
+ enabled: false
+ log_ls_http:
+ enabled: false
+ pomba_aaictxbuilder:
+ enabled: false
+ pomba_sdctxbuilder:
+ enabled: false
+ pomba_networkdiscoveryctxbuilder:
+ enabled: false
+ pomba_servicedecomposition:
+ enabled: false
+ pomba_sdncctxbuilder:
+ enabled: false
+ pomba_networkdiscovery:
+ enabled: false
+ pomba_validation_service:
+ enabled: false
+ pomba_kibana:
+ enabled: false
+ pomba_es:
+ enabled: false
+ pomba_contextaggregator:
+ enabled: false
+ message_router:
+ enabled: false
+ https: true
+ dmaap_bc:
+ enabled: true
+ https: true
+ hostname: dmaapbc
+ music:
+ enabled: false
+ https: true
+ nbi:
+ enabled: false
+ https: true
+ oof_has_api:
+ enabled: true
+ https: true
+ oof_osdf:
+ enabled: true
+ https: true
+ hostname: oofsdf
+ oof_cmso:
+ enabled: false
+ https: true
+ msb_iag:
+ enabled: false
+ pdp:
+ enabled: false
+ drools:
+ enabled: false
+ policy_api:
+ enabled: false
+ policy_pap:
+ enabled: false
+ policy_distribution:
+ enabled: false
+ policy_xacml_pdp:
+ enabled: false
+ policy_apex_pdp:
+ enabled: false
+ policy_distribution:
+ enabled: false
+ portal_app:
+ enabled: false
+ https: true
+ hostname: portal.api
+ sdc_be:
+ enabled: true
+ https: true
+ sdc_onboarding_be:
+ enabled: false
+ https: true
+ sdc_fe:
+ enabled: true
+ hostname: sdc.api.fe
+ https: true
+ sdc_dcae_be:
+ enabled: false
+ https: true
+ sdnc:
+ enabled: false
+ https: true
+ sdnc_portal:
+ enabled: false
+ https: true
+ so:
+ enabled: true
+ hostname: so.api
+ so_sdc_controller:
+ enabled: false
+ so_pbmn_infra:
+ enabled: false
+ so_catalog_db_adapter:
+ enabled: false
+ so_openstack_adapter:
+ enabled: false
+ so_request_db_adapter:
+ enabled: false
+ so_sdnc_adapter:
+ enabled: false
+ so_vfc_adapter:
+ enabled: false
+ so_vnfm_adapter:
+ enabled: false
+ so_nssmf_adapter:
+ enabled: false
+ vid:
+ enabled: false
+ refrepo:
+ enabled: true
+ https: true
+ cds_blueprints_processor_http:
+ enabled: true
+ hostname: blueprintsprocessorhttp
+ dcae_hv_ves_collector:
+ enabled: false
+ message_router_kafka:
+ enabled: false
+ inventory:
+ enabled: false
+ https: true
+ deployment-handler:
+ enabled: false
+ https: true
+ aaf_sms:
+ enabled: false
+ https: true
+
+# for access to test logs
+ robotWebUser: "test"
+ robotWebPassword: "test"
+
+# openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment
+ openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e"
+ openStackSoEncryptedPassword: "SAME_STRING_AS_SO_JAVA_ENCRYPTED_PASSWORD"
+
+# Demo configuration
+# Nexus demo artifact version. Maps to GLOBAL_INJECTED_ARTIFACTS_VERSION
+# 1.6.0 Frankfurt change or override for your release to match demo:vnfs/vFW/scripts/pom.xml version
+demoArtifactsVersion: "1.7.0"
+# Nexus demo artifact URL.
+demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
+# Openstack medium sized flavour name. Maps GLOBAL_INJECTED_VM_FLAVOR
+openStackFlavourMedium: "m1.medium"
+
+################# Openstack .RC Parameters ################################333
+# KEYSTONE Version 3 Required for Rocky and beyond
+# Openstack Keystone API version. Valid values are [ v2.0, v3 ]. Maps to GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION
+openStackKeystoneAPIVersion: "v2.0"
+
+# OS_AUTH_URL without the /v3 or /v2.0 from the openstack .RC file
+# Openstack keystone URL. Maps to GLOBAL_INJECTED_KEYSTONE
+openStackKeyStoneUrl: "http://1.2.3.4:5000"
+
+# OS_PROJECT_ID from the openstack .RC file
+# Openstack tenant UUID where VNFs will be spawned. Maps to GLOBAL_INJECTED_OPENSTACK_TENANT_ID
+openStackTenantId: "47899782ed714295b1151681fdfd51f5"
+
+# OS_PROJECT_NAME from the openstack .RC file
+# Project name of Openstack where VNFs will be spawned. Maps to GLOBAL_INJECTED_OPENSTACK_PROJECT_NAME
+openStackProjectName: "onap"
+
+# OS_USERNAME from the openstack .RC file
+# username for Openstack tenant where VNFs will be spawned. Maps to GLOBAL_INJECTED_OPENSTACK_USERNAME
+openStackUserName: "tenantUsername"
+
+# OS_PROJECT_DOMAIN_ID from the openstack .RC file
+# in some environments it is a string but in other environmens it may be a numeric
+# Domain id of openstack where VNFs will be deployed. Maps to GLOBAL_INJECTED_OPENSTACK_DOMAIN_ID
+openStackDomainId: "default"
+
+# OS_USER_DOMAIN from the openstack .RC file
+# Use Domain of openstack where VNFs will be deployed. Maps to GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN
+openStackUserDomain: "Default"
+
+
+# UUID of the Openstack network that can assign floating ips. Maps to GLOBAL_INJECTED_PUBLIC_NET_ID
+openStackPublicNetId: "e8f51958045716781ffc"
+openStackPublicNetworkName: "public"
+# Openstack region. Maps to GLOBAL_INJECTED_REGION
+openStackRegion: "RegionOne"
+
+
+# Values for second cloud instante for VNF instantiatioen testing and keystone v3
+openStackRegionRegionThree: "RegionThree"
+openStackKeyStoneUrlRegionThree: "http://1.2.3.4:5000"
+openStackKeystoneAPIVersionRegionThree: "v3"
+openStackUserNameRegionThree: "demo"
+# this is the java encrypted password that is needed for SO
+openSackMsoEncryptdPasswordRegionThree: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
+openStackTenantIdRegionThree: "3583253e932845a09cd4c8ca2f31d095"
+openStackProjectNameRegionThree: "Integration-SB-RegionThree"
+openStackDomainIdRegionThree: "Default"
+#
+# Openstack glance image name for Ubuntu 14. Maps to GLOBAL_INJECTED_UBUNTU_1404_IMAGE
+ubuntu14Image: "Ubuntu_14_trusty"
+# Openstack glance image name for Ubuntu 16. Maps to GLOBAL_INJECTED_UBUNTU_1604_IMAGE
+ubuntu16Image: "Ubuntu_16_xenial"
+# DANOS image and flavor references
+danosImage: "danos-ves-image"
+danosFlavor: "danos"
+# GLOBAL_INJECTED_SCRIPT_VERSION. Maps to GLOBAL_INJECTED_SCRIPT_VERSION
+# 1.6.0 Frankfurt change or override for your release to match demo:vnfs/vFW/scripts/pom.xml version
+scriptVersion: "1.7.0"
+# Openstack network to which VNFs will bind their primary (first) interface. Maps to GLOBAL_INJECTED_NETWORK
+openStackPrivateNetId: "e8f51956-00dd-4425-af36-045716781ffc"
+# Openstack security group for instantiating VNFs
+openStackSecurityGroup: "onap_sg"
+# SDNC Preload configuration
+# Openstack subnet UUID for the network defined by openStackPrivateNetId. Maps to onap_private_subnet_id
+openStackPrivateSubnetId: "e8f51956-00dd-4425-af36-045716781ffc"
+# CIDR notation for the Openstack private network where VNFs will be spawned. Maps to onap_private_net_cidr
+openStackPrivateNetCidr: "10.0.0.0/8"
+# The first 2 octets of the private Openstack subnet where VNFs will be spawned.
+# Needed because sdnc preload templates hardcodes things like this 10.0.${ecompnet}.X
+openStackOamNetworkCidrPrefix: "10.0"
+# If you network allowed a /16 for the ONAP OAM then leave blank and robot will assign
+# a random variable to be able to instantiate multple vFWCL, vLB etc in your environemtn
+# if you only provided a /24 then provide the 3rd octet you use for your /24
+openStackOamNetwork3rdOctet: ""
+# VID protocol/port until Selenium issue with SSL resolved
+# https/8443 vs http/8080
+vidServerProtocol: "https"
+vidServerPort: "8443"
+# Override with Pub Key for access to VNF
+vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh imported-openssh-key"
+# Override with Private Key for access to VNF
+vnfPrivateKey: "onap_dev.yaml"
+# Override with DCAE VES Collector external IP
+dcaeCollectorIp: "FILL_IN_WITH_DCAE_VES_COLLECTOR_IP"
+# kubernetes external IP for Portal GUI tests
+kubernetesExternalIp: "10.0.0.14"
+
+
+# Credentials for ONAP Component
+# AAF
+aafUsername: "demo@people.osaaf.org"
+aafPassword: "demo123456!"
+# AAI
+aaiUsername: "aai@aai.onap.org"
+aaiPassword: "demo123456!"
+# APPC
+appcUsername: "appc@appc.onap.org"
+appcPassword: "demo123456!"
+# DCAE
+dcaeUsername: "dcae@dcae.onap.org"
+dcaePassword: "demo123456!"
+# DROOLS
+droolsUsername: "demo@people.osaaf.org"
+droolsPassword: "demo123456!"
+# OOF
+oofCmsoUsername: "oof@oof.onap.org"
+oofCmsoPassword: "demo123456!"
+# POLICY
+policyAuth: "dGVzdHBkcDphbHBoYTEyMw=="
+policyClientAuth: "cHl0aG9uOnRlc3Q="
+policyUsername: "demo@people.osaaf.org"
+policyPassword: "demo123456!"
+policyComponentUsername: "healthcheck"
+policyComponentPassword: "zb!XztG34"
+# PORTAL
+portalUsername: "demo"
+portalPassword: "Kp8bJ4SXszM0WXlhak3eHlcse"
+# SO
+soUsername: "InfraPortalClient"
+soPassword: "password1$"
+soCatdbUsername: "bpel"
+soCatdbPassword: "password1$"
+# SDNC
+sdncUsername: "admin"
+sdncPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"
+# VID
+vidUsername: "demo"
+vidPassword: "Kp8bJ4SXszM0WX"
+vidHealthUsername: "Default"
+vidHealthPassword: "AppPassword!1"
+# DMAAP BC
+bcUsername: "dmaap-bc@dmaap-bc.onap.org"
+bcPassword: "demo123456!"
+# DMAAP KAFKA JAAS
+kafkaJaasUsername: "admin"
+kafkaJaasPassword: "admin_secret"
+
+#OOF
+oofUsername: "oof@oof.onap.org"
+oofPassword: "demo123456!"
+cmsoUsername: "oof@oof.onap.org"
+cmsoPassword: "demo123456!"
+oofOsdfPciOptUsername: "oof@oof.onap.org"
+oofOsdfPciOptPassword: "demo123456!"
+oofHomingUsername: "admin1"
+oofHomingPassword: "plan.15"
+
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# Resource Limit flavor -By Default using small
+flavor: small
+# Segregation for Different environment (Small and Large)
+resources:
+ small:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 500m
+ memory: 1Gi
+ large:
+ limits:
+ cpu: 4000m
+ memory: 8Gi
+ requests:
+ cpu: 1000m
+ memory: 2Gi
+ unlimited: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 180
+ periodSeconds: 10
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+readiness:
+ initialDelaySeconds: 180
+ periodSeconds: 10
+
+service:
+ name: robot
+ type: NodePort
+ portName: httpd
+ externalPort: 443
+ internalPort: 443
+ nodePort: "09"
+
+ingress:
+ enabled: false
+ service:
+ - baseaddr: "robot.api"
+ name: "robot"
+ port: 443
+ config:
+ ssl: "redirect"
+
+
+## Persist data to a persitent volume
+persistence:
+ enabled: true
+
+ ## A manually managed Persistent Volume and Claim
+ ## Requires persistence.enabled: true
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ volumeReclaimPolicy: Retain
+
+ ## database data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ accessMode: ReadWriteOnce
+ size: 2Gi
+ mountPath: /dockerdata-nfs
+ mountSubPath: robot/logs
diff --git a/kubernetes/so/components/so-bpmn-infra/resources/config/overrides/override.yaml b/kubernetes/so/components/so-bpmn-infra/resources/config/overrides/override.yaml
index 11128dd68c..b4bd316c03 100755
--- a/kubernetes/so/components/so-bpmn-infra/resources/config/overrides/override.yaml
+++ b/kubernetes/so/components/so-bpmn-infra/resources/config/overrides/override.yaml
@@ -303,6 +303,10 @@ sdnc:
dmaap:
host: http://message-router.{{ include "common.namespace" . }}:3904
timeout: 30000
+ publisher:
+ topic: RAN-Slice-Mgmt
+ callback:
+ timeout: PT5M
lcm:
path: '/restconf/operations/LCM:'
actionTimeout: 300000
diff --git a/kubernetes/so/components/soHelpers/templates/_certificates.tpl b/kubernetes/so/components/soHelpers/templates/_certificates.tpl
index cc22dc97c3..c5232e8f48 100644
--- a/kubernetes/so/components/soHelpers/templates/_certificates.tpl
+++ b/kubernetes/so/components/soHelpers/templates/_certificates.tpl
@@ -21,6 +21,8 @@
if [ "${EXIT_VALUE}" != "0" ]
then
echo "issue with password: $cadi_truststore_password"
+ ls -lh {{ $subchartDot.Values.certInitializer.credsPath }}/mycreds.prop
+ cat {{ $subchartDot.Values.certInitializer.credsPath }}/mycreds.prop
exit $EXIT_VALUE
else
keytool -importkeystore -srckeystore "{{ $subchartDot.Values.certInitializer.credsPath }}/truststoreONAPall.jks" \
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000000..60f93a35e4
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,14 @@
+[tox]
+minversion = 1.6
+envlist =
+ gitlint,
+skipsdist=true
+
+[testenv:gitlint]
+basepython = python3
+deps =
+ gitlint
+
+commands =
+ gitlint
+