summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/hardcoded_certificates.csv32
-rw-r--r--docs/index.rst5
-rw-r--r--docs/oom_cloud_setup_guide.rst11
-rw-r--r--docs/oom_developer_guide.rst16
-rw-r--r--docs/oom_hardcoded_certificates.rst72
-rw-r--r--docs/oom_project_description.rst24
-rw-r--r--docs/oom_quickstart_guide.rst95
-rw-r--r--docs/oom_setup_ingress_controller.rst187
-rw-r--r--docs/oom_setup_kubernetes_rancher.rst52
-rw-r--r--docs/oom_user_guide.rst22
-rw-r--r--docs/release-notes-dublin.rst6
-rw-r--r--docs/release-notes-elalto.rst3
-rw-r--r--docs/release-notes.rst2
m---------kubernetes/aai0
-rw-r--r--kubernetes/dcaegen2/components/dcae-cloudify-manager/values.yaml2
-rw-r--r--kubernetes/dcaegen2/components/dcae-healthcheck/values.yaml3
-rw-r--r--kubernetes/dmaap/components/dmaap-bc/templates/dmaap-provisioning-job.yaml2
-rw-r--r--kubernetes/nbi/templates/deployment.yaml6
-rw-r--r--kubernetes/portal/components/portal-mariadb/templates/deployment.yaml7
-rw-r--r--kubernetes/portal/components/portal-mariadb/values.yaml3
m---------kubernetes/robot0
21 files changed, 295 insertions, 255 deletions
diff --git a/docs/hardcoded_certificates.csv b/docs/hardcoded_certificates.csv
new file mode 100644
index 0000000000..762956febd
--- /dev/null
+++ b/docs/hardcoded_certificates.csv
@@ -0,0 +1,32 @@
+Project,ONAP Certificate,Own Certificate,MSB Certificate,Path
+AAF,No,Yes,No,aaf/charts/aaf-cert-service/resources/
+AAF,Yes,No,No,aaf/components/aaf-sms/resources/certs/intermediate_root_ca.pem
+AAI,Yes,No,No,aai/oom/resources/config/haproxy/aai.pem
+AAI,Yes,No,No,aai/oom/resources/config/aai/aai_keystore
+AAI/SEARCH-DATA,Yes,No,No,aai/oom/components/aai-search-data/resources/config/auth/tomcat_keystore
+AAI/SPARKY-BE,Yes,No,No,aai/oom/components/aai-spary-be/resources/config/auth/org.onap.aai.p12
+AAI/BABEL,No,Yes,No,aai/oom/components/aai-babel/resources/config/auth/tomcat_keystore
+AAI/MODEL-LOADER,Yes,Yes,No,aai/oom/components/aai-model-loaderresources/config/auth/tomcat_keystore
+APPC,Yes,No,No,kubernetes/appc/resources/config/certs/org.onap.appc.keyfile
+APPC,Yes,No,No,kubernetes/appc/resources/config/certs/org.onap.appc.p12
+certInitializer,Yes,No,No,kubernetes/common/certInitializer/resources
+MSB,Yes,No?,Yes,kubernetes/msb/resources/config/certificates
+MUSIC,Yes,No?,No?,kubernetes/common/music/charts/music/resources/keys/
+SDC,Yes,No?,No?,kubernetes/sdc/resources/cert
+SO,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/BPMN,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/Catalog,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/Monitoring,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/OpenStack,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/RequestDb,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/SDC,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/SDNC,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/VE/VNFM,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/VFC,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/VNFM,Yes,No?,Yes,kubernetes/so/resources/config/certificates
+SO/VNFM,No,Yes?,Yes,kubernetes/so/charts/so-secrets/resources/certs/org.onap.so.trust.jks
+VID,No,Yes,No,kubernetes/vid/resources/cert
+OOF/OOF-CMSO,Yes,No,No,kubernetes/oof/charts/oof-cmso/resources/certs
+OOF/OOF-HAS,Yes,No,No,kubernetes/oof/charts/oof-has/resources/config
+OOF/OOF-OSDF,Yes,No,No,kubernetes/oof/resources/config
+CLI,No,Yes,No,kubernetes/cli/resources/certificates
diff --git a/docs/index.rst b/docs/index.rst
index c933a726fb..c3902ecae0 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,6 +1,7 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2018 Amdocs, Bell Canada
+.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung
.. _master_index:
diff --git a/docs/oom_cloud_setup_guide.rst b/docs/oom_cloud_setup_guide.rst
index 2c6eb9a5f8..9a34036727 100644
--- a/docs/oom_cloud_setup_guide.rst
+++ b/docs/oom_cloud_setup_guide.rst
@@ -1,7 +1,7 @@
.. This work is licensed under a Creative Commons Attribution 4.0
.. International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2019 Amdocs, Bell Canada
+.. Copyright 2019-2020 Amdocs, Bell Canada, Orange, Samsung
.. _oom_cloud_setup_guide:
.. Links
@@ -92,9 +92,9 @@ Cloud Installation
.. - IBM, and
.. - `Openstack`_.
..
-.. #. Alternatively, OOM can be deployed on a private set of physical hosts or VMs
-.. (or even a combination of the two). The following guides describe how to
-.. create a Kubernetes cluster with popular tools:
+.. #. Alternatively, OOM can be deployed on a private set of physical hosts or
+.. VMs (or even a combination of the two). The following guides describe how
+.. to create a Kubernetes cluster with popular tools:
..
.. - `Setting up Kubernetes with Rancher`_ (recommended)
.. - `Setting up Kubernetes with Kubeadm`_
@@ -104,4 +104,5 @@ OOM can be deployed on a private set of physical hosts or VMs (or even a
combination of the two). The following guide describe the recommended method to
setup a Kubernetes cluster: :ref:`onap-on-kubernetes-with-rancher`.
-There are alternative deployment methods described on the `Cloud Native Deployment Wiki`_
+There are alternative deployment methods described on the
+`Cloud Native Deployment Wiki`_
diff --git a/docs/oom_developer_guide.rst b/docs/oom_developer_guide.rst
index 3cced83f62..d76bd35b39 100644
--- a/docs/oom_developer_guide.rst
+++ b/docs/oom_developer_guide.rst
@@ -1,6 +1,7 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2018 Amdocs, Bell Canada
+.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung
.. Links
.. _Helm: https://docs.helm.sh/
@@ -164,7 +165,8 @@ components and in themselves can be quite complex.
You can use either `charts` or `components` folder for your subcomponents.
`charts` folder means that the subcomponent will always been deployed.
-`components` folders means we can choose if we want to deploy the sub component.
+`components` folders means we can choose if we want to deploy the
+subcomponent.
This choice is done in root `values.yaml`:
@@ -451,10 +453,10 @@ It would render the following Service Resource (for a component named
app.kubernetes.io/instance: my-deployment-name-of-my-component
type: NodePort
-In the deployment or statefulSet file, you needs to set the good labels in order
-for the service to match the pods.
+In the deployment or statefulSet file, you needs to set the good labels in
+order for the service to match the pods.
-here's an example to be sure it matchs (for a statefulSet):
+here's an example to be sure it matches (for a statefulSet):
.. code-block:: yaml
@@ -1010,7 +1012,7 @@ MSB service discovery. The following is a brief description of how this
integration will be done:
A registrator to push the service endpoint info to MSB service
-discovery. 
+discovery.
- The needed service endpoint info is put into the kubernetes yaml file
as annotation, including service name, Protocol,version, visual
diff --git a/docs/oom_hardcoded_certificates.rst b/docs/oom_hardcoded_certificates.rst
index d853244283..9f6aa1ff0e 100644
--- a/docs/oom_hardcoded_certificates.rst
+++ b/docs/oom_hardcoded_certificates.rst
@@ -1,9 +1,10 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
.. http://creativecommons.org/licenses/by/4.0
.. Copyright 2018 Amdocs, Bell Canada, 2020 Nokia Solutions and Networks
.. Links
-.. _hardcoded-certiticates-label:
+.. _hardcoded-certificates-label:
ONAP Hardcoded certificates
###########################
@@ -11,68 +12,5 @@ ONAP Hardcoded certificates
ONAP current installation have hardcoded certificates.
Here's the list of these certificates:
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | Project | ONAP Certificate | Own Certificate | MSB Certificate | Path |
- +==================+==================+==================+=================+==========================================================================+
- | AAF | No | Yes | No | aaf/charts/aaf-cert-service/resources/ |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | AAF | Yes | No | No | aaf/components/aaf-sms/resources/certs/intermediate_root_ca.pem |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | AAI | Yes | No | No | aai/oom/resources/config/haproxy/aai.pem |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | AAI | Yes | No | No | aai/oom/resources/config/aai/aai_keystore |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | AAI/SEARCH-DATA | Yes | No | No | aai/oom/components/aai-search-data/resources/config/auth/tomcat_keystore |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | AAI/SPARKY-BE | Yes | No | No | aai/oom/components/aai-spary-be/resources/config/auth/org.onap.aai.p12 |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | AAI/BABEL | No | Yes | No | aai/oom/components/aai-babel/resources/config/auth/tomcat_keystore |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | AAI/MODEL-LOADER | Yes | Yes | No | aai/oom/components/aai-model-loaderresources/config/auth/tomcat_keystore |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | APPC | Yes | No | No | kubernetes/appc/resources/config/certs/org.onap.appc.keyfile |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | APPC | Yes | No | No | kubernetes/appc/resources/config/certs/org.onap.appc.p12 |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | certInitializer | Yes | No | No | kubernetes/common/certInitializer/resources |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | MSB | Yes | No? | Yes | kubernetes/msb/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | MUSIC | Yes | No? | No? | kubernetes/common/music/charts/music/resources/keys/ |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SDC | Yes | No? | No? | kubernetes/sdc/resources/cert |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/BPMN | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/Catalog | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/Monitoring | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/OpenStack | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/RequestDb | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/SDC | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/SDNC | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/VE/VNFM | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/VFC | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/VNFM | Yes | No? | Yes | kubernetes/so/resources/config/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | SO/VNFM | No | Yes? | Yes | kubernetes/so/charts/so-secrets/resources/certs/org.onap.so.trust.jks |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | VID | No | Yes | No | kubernetes/vid/resources/cert |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | OOF/OOF-CMSO | Yes | No | No | kubernetes/oof/charts/oof-cmso/resources/certs |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | OOF/OOF-HAS | Yes | No | No | kubernetes/oof/charts/oof-has/resources/config |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | OOF/OOF-OSDF | Yes | No | No | kubernetes/oof/resources/config |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
- | CLI | No | Yes | No | kubernetes/cli/resources/certificates |
- +------------------+------------------+------------------+-----------------+--------------------------------------------------------------------------+
+.. csv-table::
+ :file: hardcoded_certificates.csv
diff --git a/docs/oom_project_description.rst b/docs/oom_project_description.rst
index 034d0a48c9..f1587b4eeb 100644
--- a/docs/oom_project_description.rst
+++ b/docs/oom_project_description.rst
@@ -1,6 +1,7 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2018 Amdocs, Bell Canada
+.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung
.. _oom_project_description:
ONAP Operations Manager Project
@@ -23,23 +24,28 @@ In summary OOM provides the following capabilities:
- **Deploy** - with built-in component dependency management
- **Configure** - unified configuration across all ONAP components
-- **Monitor** - real-time health monitoring feeding to a Consul UI and Kubernetes
+- **Monitor** - real-time health monitoring feeding to a Consul UI and
+ Kubernetes
- **Heal**- failed ONAP containers are recreated automatically
- **Scale** - cluster ONAP services to enable seamless scaling
-- **Upgrade** - change-out containers or configuration with little or no service impact
+- **Upgrade** - change-out containers or configuration with little or no
+ service impact
- **Delete** - cleanup individual containers or entire deployments
OOM supports a wide variety of Kubernetes private clouds - built with Rancher,
-Kubeadm or Cloudify - and public cloud infrastructures such as: Microsoft Azure,
-Amazon AWS, Google GCD, VMware VIO, and Openstack.
+Kubeadm or Cloudify - and public cloud infrastructures such as: Microsoft
+Azure, Amazon AWS, Google GCD, VMware VIO, and OpenStack.
-The OOM documentation is broken into four different areas each targeted at a different user:
+The OOM documentation is broken into four different areas each targeted at a
+different user:
- :ref:`quick-start-label` - deploy ONAP on an existing cloud
- :ref:`user-guide-label` - a guide for operators of an ONAP instance
- :ref:`developer-guide-label` - a guide for developers of OOM and ONAP
-- :ref:`cloud-setup-guide-label` - a guide for those setting up cloud environments that ONAP will use
-- :ref:`hardcoded-certiticates-label` - the list of all hardcoded certificates sets in ONAP installation
+- :ref:`cloud-setup-guide-label` - a guide for those setting up cloud
+ environments that ONAP will use
+- :ref:`hardcoded-certificates-label` - the list of all hardcoded certificates
+ set in ONAP installation
The :ref:`release_notes` for OOM describe the incremental features per release.
diff --git a/docs/oom_quickstart_guide.rst b/docs/oom_quickstart_guide.rst
index 78af191872..bc4329e0d5 100644
--- a/docs/oom_quickstart_guide.rst
+++ b/docs/oom_quickstart_guide.rst
@@ -1,7 +1,7 @@
.. This work is licensed under a
.. Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2019 Amdocs, Bell Canada
+.. Copyright 2019-2020 Amdocs, Bell Canada, Orange, Samsung
.. _oom_quickstart_guide:
.. _quick-start-label:
@@ -11,7 +11,7 @@ OOM Quick Start Guide
.. figure:: oomLogoV2-medium.png
:align: right
-Once a kubernetes environment is available (follow the instructions in
+Once a Kubernetes environment is available (follow the instructions in
:ref:`cloud-setup-guide-label` if you don't have a cloud environment
available), follow the following instructions to deploy ONAP.
@@ -20,7 +20,7 @@ available), follow the following instructions to deploy ONAP.
> git clone -b <BRANCH> http://gerrit.onap.org/r/oom --recurse-submodules
> cd oom/kubernetes
-where <BRANCH> can be an offical release tag, such as
+where <BRANCH> can be an official release tag, such as
* 4.0.0-ONAP for Dublin
* 5.0.1-ONAP for El Alto
@@ -31,9 +31,9 @@ where <BRANCH> can be an offical release tag, such as
> sudo cp -R ~/oom/kubernetes/helm/plugins/ ~/.helm
-**Step 3.** Customize the helm charts like `oom/kubernetes/onap/values.yaml` or an override
-file like `onap-all.yaml`, `onap-vfw.yaml` or `openstack.yaml` file to suit your deployment
-with items like the OpenStack tenant information.
+**Step 3.** Customize the Helm charts like `oom/kubernetes/onap/values.yaml` or
+an override file like `onap-all.yaml`, `onap-vfw.yaml` or `openstack.yaml` file
+to suit your deployment with items like the OpenStack tenant information.
.. note::
Standard and example override files (e.g. `onap-all.yaml`, `openstack.yaml`) can be found in
@@ -44,15 +44,15 @@ with items like the OpenStack tenant information.
the ``enabled: true/false`` flags.
- b. Encrypt the OpenStack password using the shell tool for robot and put it in
- the robot helm charts or robot section of `openstack.yaml`
+ b. Encrypt the OpenStack password using the shell tool for Robot and put it in
+ the Robot Helm charts or Robot section of `openstack.yaml`
- c. Encrypt the OpenStack password using the java based script for SO helm charts
+ c. Encrypt the OpenStack password using the java based script for SO Helm charts
or SO section of `openstack.yaml`.
- d. Update the OpenStack parameters that will be used by robot, SO and APPC helm
+ d. Update the OpenStack parameters that will be used by Robot, SO and APPC Helm
charts or use an override file to replace them.
e. Add in the command line a value for the global master password (global.masterPassword).
@@ -68,11 +68,11 @@ We have different values file available for different contexts.
b. Generating ROBOT Encrypted Password:
-The ROBOT encrypted Password uses the same encryption.key as SO but an
+The Robot encrypted Password uses the same encryption.key as SO but an
openssl algorithm that works with the python based Robot Framework.
.. note::
- To generate ROBOT ``openStackEncryptedPasswordHere``::
+ To generate Robot ``openStackEncryptedPasswordHere``::
cd so/resources/config/mso/
/oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p``
@@ -80,7 +80,7 @@ openssl algorithm that works with the python based Robot Framework.
c. Generating SO Encrypted Password:
The SO Encrypted Password uses a java based encryption utility since the
Java encryption library is not easy to integrate with openssl/python that
-ROBOT uses in Dublin and upper versions.
+Robot uses in Dublin and upper versions.
.. note::
To generate SO ``openStackEncryptedPasswordHere`` and ``openStackSoEncryptedPassword``
@@ -101,32 +101,33 @@ ROBOT uses in Dublin and upper versions.
d. Update the OpenStack parameters:
-There are assumptions in the demonstration VNF heat templates about the networking
-available in the environment. To get the most value out of these templates and the
-automation that can help confirm the setup is correct, please observe the following
-constraints.
+There are assumptions in the demonstration VNF Heat templates about the
+networking available in the environment. To get the most value out of these
+templates and the automation that can help confirm the setup is correct, please
+observe the following constraints.
``openStackPublicNetId:``
- This network should allow heat templates to add interfaces.
- This need not be an external network, floating IPs can be assigned to the ports on
- the VMs that are created by the heat template but its important that neutron allow
- ports to be created on them.
+ This network should allow Heat templates to add interfaces.
+ This need not be an external network, floating IPs can be assigned to the
+ ports on the VMs that are created by the heat template but its important that
+ neutron allow ports to be created on them.
``openStackPrivateNetCidr: "10.0.0.0/16"``
- This ip address block is used to assign OA&M addresses on VNFs to allow ONAP connectivity.
- The demonstration heat templates assume that 10.0 prefix can be used by the VNFs and the
- demonstration ip addressing plan embodied in the preload template prevent conflicts when
- instantiating the various VNFs. If you need to change this, you will need to modify the preload
- data in the robot helm chart like integration_preload_parametes.py and the demo/heat/preload_data
- in the robot container. The size of the CIDR should be sufficient for ONAP and the VMs you expect
- to create.
+ This ip address block is used to assign OA&M addresses on VNFs to allow ONAP
+ connectivity. The demonstration Heat templates assume that 10.0 prefix can be
+ used by the VNFs and the demonstration ip addressing plan embodied in the
+ preload template prevent conflicts when instantiating the various VNFs. If
+ you need to change this, you will need to modify the preload data in the
+ Robot Helm chart like integration_preload_parameters.py and the
+ demo/heat/preload_data in the Robot container. The size of the CIDR should
+ be sufficient for ONAP and the VMs you expect to create.
``openStackOamNetworkCidrPrefix: "10.0"``
- This ip prefix mush match the openStackPrivateNetCidr and is a helper variable to some of the
- robot scripts for demonstration. A production deployment need not worry about this
- setting but for the demonstration VNFs the ip asssignment strategy assumes 10.0 ip prefix.
-
+ This ip prefix mush match the openStackPrivateNetCidr and is a helper
+ variable to some of the Robot scripts for demonstration. A production
+ deployment need not worry about this setting but for the demonstration VNFs
+ the ip asssignment strategy assumes 10.0 ip prefix.
Example Keystone v2.0
@@ -165,7 +166,9 @@ follows::
.. literalinclude:: helm-search.txt
.. note::
- The setup of the Helm repository is a one time activity. If you make changes to your deployment charts or values be sure to use ``make`` to update your local Helm repository.
+ The setup of the Helm repository is a one time activity. If you make changes
+ to your deployment charts or values be sure to use ``make`` to update your
+ local Helm repository.
**Step 8.** Once the repo is setup, installation of ONAP can be done with a
single command
@@ -189,26 +192,35 @@ To deploy all ONAP applications use this command::
> cd oom/kubernetes
> helm deploy dev local/onap --namespace onap --set global.masterPassword=myAwesomePasswordThatINeedToChange -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/environment.yaml -f onap/resources/overrides/openstack.yaml --timeout 900
-All override files may be customized (or replaced by other overrides) as per needs.
+All override files may be customized (or replaced by other overrides) as per
+needs.
`onap-all.yaml`
- Enables the modules in the ONAP deployment. As ONAP is very modular, it is possible to customize ONAP and disable some components through this configuration file.
+ Enables the modules in the ONAP deployment. As ONAP is very modular, it is
+ possible to customize ONAP and disable some components through this
+ configuration file.
`onap-all-ingress-nginx-vhost.yaml`
- Alternative version of the `onap-all.yaml` but with global ingress controller enabled. It requires the cluster configured with the nginx ingress controller and load balancer.
- Please use this file instad `onap-all.yaml` if you want to use experimental ingress controller feature.
+ Alternative version of the `onap-all.yaml` but with global ingress controller
+ enabled. It requires the cluster configured with the nginx ingress controller
+ and load balancer. Please use this file instead `onap-all.yaml` if you want
+ to use experimental ingress controller feature.
`environment.yaml`
Includes configuration values specific to the deployment environment.
- Example: adapt readiness and liveness timers to the level of performance of your infrastructure
+ Example: adapt readiness and liveness timers to the level of performance of
+ your infrastructure
`openstack.yaml`
- Includes all the Openstack related information for the default target tenant you want to use to deploy VNFs from ONAP and/or additional parameters for the embedded tests.
+ Includes all the OpenStack related information for the default target tenant
+ you want to use to deploy VNFs from ONAP and/or additional parameters for the
+ embedded tests.
**Step 9.** Verify ONAP installation
-Use the following to monitor your deployment and determine when ONAP is ready for use::
+Use the following to monitor your deployment and determine when ONAP is ready
+for use::
> kubectl get pods -n onap -o=wide
@@ -219,7 +231,8 @@ Use the following to monitor your deployment and determine when ONAP is ready fo
> ~/oom/kubernetes/robot/ete-k8s.sh onap health
-**Step 10.** Undeploy ONAP::
+**Step 10.** Undeploy ONAP
+::
> helm undeploy dev --purge
diff --git a/docs/oom_setup_ingress_controller.rst b/docs/oom_setup_ingress_controller.rst
index a4abc2b390..c15171c7be 100644
--- a/docs/oom_setup_ingress_controller.rst
+++ b/docs/oom_setup_ingress_controller.rst
@@ -1,4 +1,5 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
.. http://creativecommons.org/licenses/by/4.0
.. Copyright 2020, Samsung Electronics
@@ -20,9 +21,10 @@
Ingress controller setup on HA Kubernetes Cluster
#################################################
-This guide provides instruction how to setup experimental ingress controller feature.
-For this, we are hosting our cluster on OpenStack VMs and using the Rancher Kubernetes Engine (RKE)
-to deploy and manage our Kubernetes Cluster and ingress controller
+This guide provides instruction how to setup experimental ingress controller
+feature. For this, we are hosting our cluster on OpenStack VMs and using the
+Rancher Kubernetes Engine (RKE) to deploy and manage our Kubernetes Cluster and
+ingress controller
.. contents::
:depth: 1
@@ -33,127 +35,148 @@ The result at the end of this tutorial will be:
#. Customization of the cluster.yaml file for ingress controller support
-#. Installation and configuration test DNS server for ingress host resolution on testing machines
+#. Installation and configuration test DNS server for ingress host resolution
+ on testing machines
-#. Instalation and configuration MLB (Metal Load Balancer) required for exposing ingress service
+#. Installation and configuration MLB (Metal Load Balancer) required for
+ exposing ingress service
-#. Instalation and configuration NGINX ingress controller
+#. Installation and configuration NGINX ingress controller
-#. Additional info howto deploy onap with services exposed via Ingress controller
+#. Additional info how to deploy ONAP with services exposed via Ingress
+ controller
Customize cluster.yml file
-===========================
-Before setup cluster for ingress purposes DNS cluster IP and ingress provider should be configured and follwing:
+==========================
+Before setup cluster for ingress purposes DNS cluster IP and ingress provider
+should be configured and following:
.. code-block:: yaml
- <...>
- restore:
- restore: false
- snapshot_name: ""
- ingress:
- provider: none
- dns:
- provider: coredns
- upstreamnameservers:
- - <custer_dns_ip>:31555
-
-Where the <cluster_dns_ip> should be set to the same IP as the CONTROLPANE node.
-
-For external load balacer purposes minimum one of the worker node should be configured with external IP
-address accessible outside the cluster. It can be done using the following example node configuration:
+
+ ---
+ <...>
+ restore:
+ restore: false
+ snapshot_name: ""
+ ingress:
+ provider: none
+ dns:
+ provider: coredns
+ upstreamnameservers:
+ - <custer_dns_ip>:31555
+
+Where the <cluster_dns_ip> should be set to the same IP as the CONTROLPANE
+node.
+
+For external load balancer purposes, minimum one of the worker node should be
+configured with external IP address accessible outside the cluster. It can be
+done using the following example node configuration:
.. code-block:: yaml
- <...>
- - address: <external_ip>
- internal_address: <internal_ip>
- port: "22"
- role:
- - worker
- hostname_override: "onap-worker-0"
- user: ubuntu
- ssh_key_path: "~/.ssh/id_rsa"
- <...>
-Where the <external_ip> is external worker node IP address, and <internal_ip> is internal node IP address if it is required
+ ---
+ <...>
+ - address: <external_ip>
+ internal_address: <internal_ip>
+ port: "22"
+ role:
+ - worker
+ hostname_override: "onap-worker-0"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/id_rsa"
+ <...>
+Where the <external_ip> is external worker node IP address, and <internal_ip>
+is internal node IP address if it is required.
-DNS server configuration and instalation
-========================
-DNS server deployed on the Kubernetes cluster makes it easy to use services exposed through ingress controller because it
-resolves all subdomain related to the onap cluster to the load balancer IP.
-Testing ONAP cluster requires a lot of entries on the target machines in the /etc/hosts.
-Adding many entries into the configuration files on testing machines is quite problematic and error prone.
-The better wait is to create central DNS server with entries for all virtual host pointed to simpledemo.onap.org and add custom DNS server as a target DNS server for testing machines and/or as external DNS for kubernetes cluster.
+DNS server configuration and installation
+=========================================
+DNS server deployed on the Kubernetes cluster makes it easy to use services
+exposed through ingress controller because it resolves all subdomain related to
+the ONAP cluster to the load balancer IP. Testing ONAP cluster requires a lot
+of entries on the target machines in the /etc/hosts. Adding many entries into
+the configuration files on testing machines is quite problematic and error
+prone. The better wait is to create central DNS server with entries for all
+virtual host pointed to simpledemo.onap.org and add custom DNS server as a
+target DNS server for testing machines and/or as external DNS for Kubernetes
+cluster.
-DNS server has automatic instalation and configuration script, so instalation is quite easy::
+DNS server has automatic installation and configuration script, so installation
+is quite easy::
- > cd kubernetes/contrib/dns-server-for-vhost-ingress-testing
+ > cd kubernetes/contrib/dns-server-for-vhost-ingress-testing
- > ./deploy\_dns.sh
+ > ./deploy\_dns.sh
After DNS deploy you need to setup DNS entry on the target testing machine.
Because DNS listen on non standard port configuration require iptables rules
-on the target machine. Please follow the configuation proposed by the deploy scripts
+on the target machine. Please follow the configuration proposed by the deploy
+scripts.
Example output depends on the IP address and example output looks like bellow::
-
- DNS server already deployed:
- 1. You can add the DNS server to the target machine using following commands:
- sudo iptables -t nat -A OUTPUT -p tcp -d 192.168.211.211 --dport 53 -j DNAT --to-destination 10.10.13.14:31555
- sudo iptables -t nat -A OUTPUT -p udp -d 192.168.211.211 --dport 53 -j DNAT --to-destination 10.10.13.14:31555
- sudo sysctl -w net.ipv4.conf.all.route_localnet=1
- sudo sysctl -w net.ipv4.ip_forward=1
- 2. Update /etc/resolv.conf file with nameserver 192.168.211.211 entry on your target machine
+ DNS server already deployed:
+ 1. You can add the DNS server to the target machine using following commands:
+ sudo iptables -t nat -A OUTPUT -p tcp -d 192.168.211.211 --dport 53 -j DNAT --to-destination 10.10.13.14:31555
+ sudo iptables -t nat -A OUTPUT -p udp -d 192.168.211.211 --dport 53 -j DNAT --to-destination 10.10.13.14:31555
+ sudo sysctl -w net.ipv4.conf.all.route_localnet=1
+ sudo sysctl -w net.ipv4.ip_forward=1
+ 2. Update /etc/resolv.conf file with nameserver 192.168.211.211 entry on your target machine
-MetalLB Load Balancer instalation and configuration
+MetalLB Load Balancer installation and configuration
====================================================
-By default pure Kubernetes cluster requires external load balancer if we want to expose
-external port using LoadBalancer settings. For this purpose MetalLB can be used.
-Before installing the MetalLB you need to ensure that at least one worker has assigned IP acessible outside the cluster.
+By default pure Kubernetes cluster requires external load balancer if we want
+to expose external port using LoadBalancer settings. For this purpose MetalLB
+can be used. Before installing the MetalLB you need to ensure that at least one
+worker has assigned IP accessible outside the cluster.
-MetalLB Load balanancer can be easily installed using automatic install script::
+MetalLB Load balancer can be easily installed using automatic install script::
- > cd kubernetes/contrib/metallb-loadbalancer-inst
+ > cd kubernetes/contrib/metallb-loadbalancer-inst
- > ./install-metallb-on-cluster.sh
+ > ./install-metallb-on-cluster.sh
-Configuration NGINX ingress controller
+Configuration Ngninx ingress controller
=======================================
-After installation DNS server and ingress controller we can install and configure ingress controller.
+After installation DNS server and ingress controller we can install and
+configure ingress controller.
It can be done using the following commands::
- > cd kubernetes/contrib/ingress-nginx-post-inst
+ > cd kubernetes/contrib/ingress-nginx-post-inst
- > kubectl apply -f nginx_ingress_cluster_config.yaml
+ > kubectl apply -f nginx_ingress_cluster_config.yaml
- > kubectl apply -f nginx_ingress_enable_optional_load_balacer_service.yaml
+ > kubectl apply -f nginx_ingress_enable_optional_load_balacer_service.yaml
-After deploy NGINX ingress controller you can ensure that the ingress port is exposed as load balancer service
-with external IP address::
+After deploy NGINX ingress controller you can ensure that the ingress port is
+exposed as load balancer service with external IP address::
- > kubectl get svc -n ingress-nginx
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- default-http-backend ClusterIP 10.10.10.10 <none> 80/TCP 25h
- ingress-nginx LoadBalancer 10.10.10.11 10.12.13.14 80:31308/TCP,443:30314/TCP 24h
+ > kubectl get svc -n ingress-nginx
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ default-http-backend ClusterIP 10.10.10.10 <none> 80/TCP 25h
+ ingress-nginx LoadBalancer 10.10.10.11 10.12.13.14 80:31308/TCP,443:30314/TCP 24h
ONAP with ingress exposed services
-=====================================
-If you want to deploy onap with services exposed through ingress controller you can use full onap deploy script::
- > onap/resources/overrides/onap-all-ingress-nginx-vhost.yaml
+==================================
+If you want to deploy onap with services exposed through ingress controller you
+can use full onap deploy script::
+
+ > onap/resources/overrides/onap-all-ingress-nginx-vhost.yaml
Ingress also can be enabled on any onap setup override using following code:
.. code-block:: yaml
- <...>
- #ingress virtualhost based configuration
- global:
- <...>
- ingress:
- enabled: true
+
+ ---
+ <...>
+ #ingress virtualhost based configuration
+ global:
+ <...>
+ ingress:
+ enabled: true
diff --git a/docs/oom_setup_kubernetes_rancher.rst b/docs/oom_setup_kubernetes_rancher.rst
index 428fa59a4e..67ebca544b 100644
--- a/docs/oom_setup_kubernetes_rancher.rst
+++ b/docs/oom_setup_kubernetes_rancher.rst
@@ -1,6 +1,7 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2018 Amdocs, Bell Canada
+.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung
.. Links
.. _HELM Best Practices Guide: https://docs.helm.sh/chart_best_practices/#requirements
@@ -19,9 +20,9 @@
ONAP on HA Kubernetes Cluster
#############################
-This guide provides instructions on how to setup a Highly-Available Kubernetes Cluster.
-For this, we are hosting our cluster on OpenStack VMs and using the Rancher Kubernetes Engine (RKE)
-to deploy and manage our Kubernetes Cluster.
+This guide provides instructions on how to setup a Highly-Available Kubernetes
+Cluster. For this, we are hosting our cluster on OpenStack VMs and using the
+Rancher Kubernetes Engine (RKE) to deploy and manage our Kubernetes Cluster.
.. contents::
:depth: 1
@@ -40,12 +41,14 @@ The result at the end of this tutorial will be:
#. Installation and configuration of kubectl
-#. Installation and configuration of helm
+#. Installation and configuration of Helm
#. Creation of an NFS Server to be used by ONAP as shared persistance
-There are many ways one can execute the above steps. Including automation through the use of HEAT to setup the OpenStack VMs.
-To better illustrate the steps involved, we have captured the manual creation of such an environment using the ONAP Wind River Open Lab.
+There are many ways one can execute the above steps. Including automation
+through the use of HEAT to setup the OpenStack VMs. To better illustrate the
+steps involved, we have captured the manual creation of such an environment
+using the ONAP Wind River Open Lab.
Create Key Pair
===============
@@ -57,10 +60,12 @@ Use an existing key pair, import one or create a new one to assign.
.. image:: images/keys/key_pair_1.png
.. Note::
- If you're creating a new Key Pair, ensure to create a local copy of the Private Key through the use of "Copy Private Key to Clipboard".
+ If you're creating a new Key Pair, ensure to create a local copy of the
+ Private Key through the use of "Copy Private Key to Clipboard".
For the purpose of this guide, we will assume a new local key called "onap-key"
-has been downloaded and is copied into **~/.ssh/**, from which it can be referenced.
+has been downloaded and is copied into **~/.ssh/**, from which it can be
+referenced.
Example::
@@ -175,16 +180,17 @@ Launch Instance
Create Kubernetes Worker VMs
============================
The following instructions describe how to create OpenStack VMs to host the
-Highly-Available Kubernetes Workers. ONAP workloads will only be scheduled on these nodes.
+Highly-Available Kubernetes Workers. ONAP workloads will only be scheduled on
+these nodes.
Launch new VM instances
-----------------------
-The number and size of Worker VMs is depenedent on the size of the ONAP deployment.
-By default, all ONAP applications are deployed. It's possible to customize the deployment
-and enable a subset of the ONAP applications. For the purpose of this guide, however,
-we will deploy 12 Kubernetes Workers that have been sized to handle the entire ONAP
-application workload.
+The number and size of Worker VMs is dependent on the size of the ONAP
+deployment. By default, all ONAP applications are deployed. It's possible to
+customize the deployment and enable a subset of the ONAP applications. For the
+purpose of this guide, however, we will deploy 12 Kubernetes Workers that have
+been sized to handle the entire ONAP application workload.
.. image:: images/wk_vms/worker_1.png
@@ -223,8 +229,8 @@ Assign the key pair that was created/selected previously (e.g. onap_key).
Apply customization script for Kubernetes VM(s)
-----------------------------------------------
-Click :download:`openstack-k8s-workernode.sh <openstack-k8s-workernode.sh>` to download the
-script.
+Click :download:`openstack-k8s-workernode.sh <openstack-k8s-workernode.sh>` to
+download the script.
.. literalinclude:: openstack-k8s-workernode.sh
:language: bash
@@ -340,8 +346,8 @@ Download and install kubectl. Binaries can be found here for Linux and Mac:
https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/linux/amd64/kubectl
https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/darwin/amd64/kubectl
-You only need to install kubectl where you'll launch kubernetes command. This
-can be any machines of the kubernetes cluster or a machine that has IP access
+You only need to install kubectl where you'll launch Kubernetes command. This
+can be any machines of the Kubernetes cluster or a machine that has IP access
to the APIs.
Usually, we use the first controller as it has also access to internal
Kubernetes services, which can be convenient.
@@ -451,8 +457,8 @@ Assign the key pair that was created/selected previously (e.g. onap_key).
Apply customization script for NFS Server VM
--------------------------------------------
-Click :download:`openstack-nfs-server.sh <openstack-nfs-server.sh>` to download the
-script.
+Click :download:`openstack-nfs-server.sh <openstack-nfs-server.sh>` to download
+the script.
.. literalinclude:: openstack-nfs-server.sh
:language: bash
@@ -507,7 +513,7 @@ the NFS Master node as input, e.g.::
ONAP Deployment via OOM
=======================
-Now that kubernetes and Helm are installed and configured you can prepare to
+Now that Kubernetes and Helm are installed and configured you can prepare to
deploy ONAP. Follow the instructions in the README.md_ or look at the official
documentation to get started:
diff --git a/docs/oom_user_guide.rst b/docs/oom_user_guide.rst
index 847795dc17..8a7ebfcb25 100644
--- a/docs/oom_user_guide.rst
+++ b/docs/oom_user_guide.rst
@@ -1,6 +1,7 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2018 Amdocs, Bell Canada
+.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung
.. _oom_user_guide:
.. Links
@@ -36,7 +37,8 @@ The following sections describe the life-cycle operations:
- Monitor_ - real-time health monitoring feeding to a Consul UI and Kubernetes
- Heal_- failed ONAP containers are recreated automatically
- Scale_ - cluster ONAP services to enable seamless scaling
-- Upgrade_ - change-out containers or configuration with little or no service impact
+- Upgrade_ - change-out containers or configuration with little or no service
+ impact
- Delete_ - cleanup individual containers or entire deployments
.. figure:: oomLogoV2-Deploy.png
@@ -365,19 +367,19 @@ Accessing the ONAP Portal using OOM and a Kubernetes Cluster
------------------------------------------------------------
The ONAP deployment created by OOM operates in a private IP network that isn't
-publicly accessible (i.e. Openstack VMs with private internal network) which
+publicly accessible (i.e. OpenStack VMs with private internal network) which
blocks access to the ONAP Portal. To enable direct access to this Portal from a
user's own environment (a laptop etc.) the portal application's port 8989 is
exposed through a `Kubernetes LoadBalancer`_ object.
Typically, to be able to access the Kubernetes nodes publicly a public address
-is assigned. In Openstack this is a floating IP address.
+is assigned. In OpenStack this is a floating IP address.
When the `portal-app` chart is deployed a Kubernetes service is created that
instantiates a load balancer. The LB chooses the private interface of one of
the nodes as in the example below (10.0.0.4 is private to the K8s cluster only).
Then to be able to access the portal on port 8989 from outside the K8s &
-Openstack environment, the user needs to assign/get the floating IP address that
+OpenStack environment, the user needs to assign/get the floating IP address that
corresponds to the private IP as follows::
> kubectl -n onap get services|grep "portal-app"
@@ -386,7 +388,7 @@ corresponds to the private IP as follows::
In this example, use the 10.0.0.4 private address as a key find the
corresponding public address which in this example is 10.12.6.155. If you're
-using OpenStack you'll do the lookup with the horizon GUI or the Openstack CLI
+using OpenStack you'll do the lookup with the horizon GUI or the OpenStack CLI
for your tenant (openstack server list). That IP is then used in your
`/etc/hosts` to map the fixed DNS aliases required by the ONAP Portal as shown
below::
@@ -451,8 +453,8 @@ Monitor
All highly available systems include at least one facility to monitor the
health of components within the system. Such health monitors are often used as
-inputs to distributed coordination systems (such as etcd, zookeeper, or consul)
-and monitoring systems (such as nagios or zabbix). OOM provides two mechanisms
+inputs to distributed coordination systems (such as etcd, Zookeeper, or Consul)
+and monitoring systems (such as Nagios or Zabbix). OOM provides two mechanisms
to monitor the real-time health of an ONAP deployment:
- a Consul GUI for a human operator or downstream monitoring systems and
@@ -609,7 +611,7 @@ Kubernetes and replaced with a new container with the new environment value.
To upgrade a component to a new version with a new configuration file enter::
- > helm deploy onbap onap/so --version 2.0.2 -f environments/demo.yaml
+ > helm deploy onap onap/so --version 2.0.2 -f environments/demo.yaml
To fetch release history enter::
diff --git a/docs/release-notes-dublin.rst b/docs/release-notes-dublin.rst
index 6201f56350..e948af5ebb 100644
--- a/docs/release-notes-dublin.rst
+++ b/docs/release-notes-dublin.rst
@@ -26,10 +26,12 @@ Summary
**Platform Resiliency**
* Documenation of a Highly-Available Kubernetes Cluster Deployment
-* Availability of a Default Storage Class Provisioner for improved Persistent Storage resiliency
+* Availability of a Default Storage Class Provisioner for improved Persistent
+ Storage resiliency
* Availability of a CNI reference integration for Multi-site support
- * applications can take advantage of multi-site by using POD and/or Node (anti)affinity, taints/tolerations, labels per application
+ * applications can take advantage of multi-site by using POD and/or Node
+ (anti)affinity, taints/tolerations, labels per application
**Footprint Optimization**
diff --git a/docs/release-notes-elalto.rst b/docs/release-notes-elalto.rst
index f23751d0ed..435889ef32 100644
--- a/docs/release-notes-elalto.rst
+++ b/docs/release-notes-elalto.rst
@@ -24,7 +24,8 @@ Version 5.0.1 (El Alto Release)
Summary
-------
-The focus of this release was on maintanence and as such no new features were delivered.
+The focus of this release was on maintanence and as such no new features were
+delivered.
A list of issues resolved in this release can be found here: https://jira.onap.org/projects/OOM/versions/10726
**New Features**
diff --git a/docs/release-notes.rst b/docs/release-notes.rst
index 5570d4d722..382b49961d 100644
--- a/docs/release-notes.rst
+++ b/docs/release-notes.rst
@@ -133,7 +133,7 @@ Workarounds
version 2.2.2 in global part of override file if the new check is needed.
- `OOM-2421 <https://jira.onap.org/browse/OOM-2421>`_ Workaround is to
undeploy/redeploy NBI.
-- `OOM-2422 <https://jira.onap.org/browse/OOM-2421>`_ Workaround is to create
+- `OOM-2422 <https://jira.onap.org/browse/OOM-2422>`_ Workaround is to create
first portal app service with service type Cluster IP then changing it to
NodePort or LoadBalancer so all the port are available.
diff --git a/kubernetes/aai b/kubernetes/aai
-Subproject f1e329c458c48530da36dd1c6b38b7548116bd6
+Subproject 46961c4794a17f72643bd491af6c159ea7e5338
diff --git a/kubernetes/dcaegen2/components/dcae-cloudify-manager/values.yaml b/kubernetes/dcaegen2/components/dcae-cloudify-manager/values.yaml
index b7ea4c9e6f..f5d1ffe1ed 100644
--- a/kubernetes/dcaegen2/components/dcae-cloudify-manager/values.yaml
+++ b/kubernetes/dcaegen2/components/dcae-cloudify-manager/values.yaml
@@ -46,7 +46,7 @@ config:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.cm-container:3.0.0
+image: onap/org.onap.dcaegen2.deployments.cm-container:3.1.0
pullPolicy: Always
# name of shared ConfigMap with kubeconfig for multiple clusters
diff --git a/kubernetes/dcaegen2/components/dcae-healthcheck/values.yaml b/kubernetes/dcaegen2/components/dcae-healthcheck/values.yaml
index ca9486f715..a818435a03 100644
--- a/kubernetes/dcaegen2/components/dcae-healthcheck/values.yaml
+++ b/kubernetes/dcaegen2/components/dcae-healthcheck/values.yaml
@@ -2,6 +2,7 @@
#=================================================================================
# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
# Modifications Copyright © 2018 Amdocs, Bell Canada
+# Modifications Copyright © 2020 Nokia
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -45,7 +46,7 @@ readiness:
periodSeconds: 10
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.healthcheck-container:2.0.0
+image: onap/org.onap.dcaegen2.deployments.healthcheck-container:2.1.0
# Resource Limit flavor -By Default using small
flavor: small
diff --git a/kubernetes/dmaap/components/dmaap-bc/templates/dmaap-provisioning-job.yaml b/kubernetes/dmaap/components/dmaap-bc/templates/dmaap-provisioning-job.yaml
index cde35af14c..f8ef8d1452 100644
--- a/kubernetes/dmaap/components/dmaap-bc/templates/dmaap-provisioning-job.yaml
+++ b/kubernetes/dmaap/components/dmaap-bc/templates/dmaap-provisioning-job.yaml
@@ -5,7 +5,7 @@ metadata:
namespace: {{ include "common.namespace" . }}
labels: {{- include "common.labels" . | nindent 4 }}
spec:
- backoffLimit: 5
+ backoffLimit: 20
template:
metadata: {{- include "common.templateMetadata" . | nindent 6 }}
spec:
diff --git a/kubernetes/nbi/templates/deployment.yaml b/kubernetes/nbi/templates/deployment.yaml
index 22dd4a1ded..dc2f5dbb81 100644
--- a/kubernetes/nbi/templates/deployment.yaml
+++ b/kubernetes/nbi/templates/deployment.yaml
@@ -1,5 +1,6 @@
# Copyright © 2018 Orange
# Modifications Copyright © 2018 Amdocs, Bell Canada
+# Modifications Copyright © 2020 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "common.fullname" . }}
@@ -24,6 +25,9 @@ metadata:
release: {{ include "common.release" . }}
heritage: {{ .Release.Service }}
spec:
+ selector:
+ matchLabels:
+ app: {{ include "common.name" . }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
diff --git a/kubernetes/portal/components/portal-mariadb/templates/deployment.yaml b/kubernetes/portal/components/portal-mariadb/templates/deployment.yaml
index bcd223c7e6..ec6cc50634 100644
--- a/kubernetes/portal/components/portal-mariadb/templates/deployment.yaml
+++ b/kubernetes/portal/components/portal-mariadb/templates/deployment.yaml
@@ -34,6 +34,13 @@ spec:
app: {{ include "common.name" . }}
release: {{ include "common.release" . }}
spec:
+ initContainers:
+ - name: volume-permissions
+ image: "{{ .Values.global.busyboxRepository }}/{{ .Values.global.busyboxImage }}"
+ command: ['sh', '-c', 'chmod -R 777 /var/lib/mysql']
+ volumeMounts:
+ - mountPath: /var/lib/mysql
+ name: mariadb-data
containers:
- name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
diff --git a/kubernetes/portal/components/portal-mariadb/values.yaml b/kubernetes/portal/components/portal-mariadb/values.yaml
index 08157f7b92..40b1775ae9 100644
--- a/kubernetes/portal/components/portal-mariadb/values.yaml
+++ b/kubernetes/portal/components/portal-mariadb/values.yaml
@@ -21,7 +21,8 @@ global: # global defaults
persistence: {}
readinessRepository: oomk8s
readinessImage: readiness-check:2.0.0
-
+ busyBoxImage: busybox:1.30
+ busyBoxRepository: docker.io
# application image
repository: nexus3.onap.org:10001
diff --git a/kubernetes/robot b/kubernetes/robot
-Subproject 1ab576be1d226a586738658943253f3e721e5a6
+Subproject b093c77b4faa2c4f0bfc67e481f724b6d67c722