summaryrefslogtreecommitdiffstats
path: root/docs/archived
diff options
context:
space:
mode:
Diffstat (limited to 'docs/archived')
-rw-r--r--docs/archived/certs/hardcoded_certificates.csv18
-rw-r--r--docs/archived/images/consul/consulHealth.pngbin0 -> 301016 bytes
-rw-r--r--docs/archived/images/cp_vms/control_plane_1.pngbin0 -> 318506 bytes
-rw-r--r--docs/archived/images/cp_vms/control_plane_2.pngbin0 -> 247673 bytes
-rw-r--r--docs/archived/images/cp_vms/control_plane_3.pngbin0 -> 246800 bytes
-rw-r--r--docs/archived/images/cp_vms/control_plane_4.pngbin0 -> 91840 bytes
-rw-r--r--docs/archived/images/cp_vms/control_plane_5.pngbin0 -> 206942 bytes
-rw-r--r--docs/archived/images/cp_vms/control_plane_6.pngbin0 -> 246780 bytes
-rw-r--r--docs/archived/images/cp_vms/control_plane_7.pngbin0 -> 145992 bytes
-rw-r--r--docs/archived/images/cp_vms/control_plane_8.pngbin0 -> 120550 bytes
-rw-r--r--docs/archived/images/floating_ips/floating_1.pngbin0 -> 30028 bytes
-rw-r--r--docs/archived/images/floating_ips/floating_2.pngbin0 -> 85017 bytes
-rw-r--r--docs/archived/images/k8s/kubernetes_objects.pngbin0 -> 41593 bytes
-rw-r--r--docs/archived/images/keys/key_pair_1.pngbin0 -> 296083 bytes
-rw-r--r--docs/archived/images/keys/key_pair_2.pngbin0 -> 310423 bytes
-rw-r--r--docs/archived/images/keys/key_pair_3.pngbin0 -> 271397 bytes
-rw-r--r--docs/archived/images/keys/key_pair_4.pngbin0 -> 155219 bytes
-rw-r--r--docs/archived/images/msb/MSB-OOM-Diagram.pngbin0 -> 77338 bytes
-rw-r--r--docs/archived/images/network/network_1.pngbin0 -> 127009 bytes
-rw-r--r--docs/archived/images/network/network_2.pngbin0 -> 133842 bytes
-rw-r--r--docs/archived/images/network/network_3.pngbin0 -> 105085 bytes
-rw-r--r--docs/archived/images/network/network_4.pngbin0 -> 97405 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_1.pngbin0 -> 164108 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_10.pngbin0 -> 20699 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_2.pngbin0 -> 318506 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_3.pngbin0 -> 247673 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_4.pngbin0 -> 246800 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_5.pngbin0 -> 91840 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_6.pngbin0 -> 206942 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_7.pngbin0 -> 246780 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_8.pngbin0 -> 74954 bytes
-rw-r--r--docs/archived/images/nfs_server/nfs_server_9.pngbin0 -> 26251 bytes
-rw-r--r--docs/archived/images/oom_logo/oomLogoV2-Configure.pngbin0 -> 50668 bytes
-rw-r--r--docs/archived/images/oom_logo/oomLogoV2-Delete.pngbin0 -> 48038 bytes
-rw-r--r--docs/archived/images/oom_logo/oomLogoV2-Deploy.pngbin0 -> 48500 bytes
-rw-r--r--docs/archived/images/oom_logo/oomLogoV2-Heal.pngbin0 -> 48092 bytes
-rw-r--r--docs/archived/images/oom_logo/oomLogoV2-Monitor.pngbin0 -> 50051 bytes
-rw-r--r--docs/archived/images/oom_logo/oomLogoV2-Scale.pngbin0 -> 49430 bytes
-rw-r--r--docs/archived/images/oom_logo/oomLogoV2-Upgrade.pngbin0 -> 49259 bytes
-rw-r--r--docs/archived/images/oom_logo/oomLogoV2-medium.pngbin0 -> 31090 bytes
-rw-r--r--docs/archived/images/rke/rke_1.pngbin0 -> 143906 bytes
-rw-r--r--docs/archived/images/sg/sg_1.pngbin0 -> 108229 bytes
-rw-r--r--docs/archived/images/sg/sg_2.pngbin0 -> 153078 bytes
-rw-r--r--docs/archived/images/wk_vms/worker_1.pngbin0 -> 126955 bytes
-rw-r--r--docs/archived/images/wk_vms/worker_2.pngbin0 -> 247673 bytes
-rw-r--r--docs/archived/images/wk_vms/worker_3.pngbin0 -> 96987 bytes
-rw-r--r--docs/archived/images/wk_vms/worker_4.pngbin0 -> 91840 bytes
-rw-r--r--docs/archived/images/wk_vms/worker_5.pngbin0 -> 206942 bytes
-rw-r--r--docs/archived/images/wk_vms/worker_6.pngbin0 -> 143838 bytes
-rw-r--r--docs/archived/images/wk_vms/worker_7.pngbin0 -> 222941 bytes
-rw-r--r--docs/archived/oom_developer_guide.rst1149
-rw-r--r--docs/archived/oom_hardcoded_certificates.rst18
-rw-r--r--docs/archived/oom_quickstart_guide.rst284
-rw-r--r--docs/archived/oom_setup_kubernetes_rancher.rst531
-rw-r--r--docs/archived/oom_setup_paas.rst144
-rw-r--r--docs/archived/oom_user_guide.rst798
-rw-r--r--docs/archived/shell/master_nfs_node.sh32
-rw-r--r--docs/archived/shell/openstack-k8s-controlnode.sh36
-rw-r--r--docs/archived/shell/openstack-k8s-workernode.sh34
-rw-r--r--docs/archived/shell/openstack-nfs-server.sh18
-rw-r--r--docs/archived/shell/slave_nfs_node.sh25
-rw-r--r--docs/archived/yaml/cluster.yml156
-rw-r--r--docs/archived/yaml/example-integration-override-v3.yaml69
-rw-r--r--docs/archived/yaml/example-integration-override.yaml56
64 files changed, 3368 insertions, 0 deletions
diff --git a/docs/archived/certs/hardcoded_certificates.csv b/docs/archived/certs/hardcoded_certificates.csv
new file mode 100644
index 0000000000..fbc7db3e11
--- /dev/null
+++ b/docs/archived/certs/hardcoded_certificates.csv
@@ -0,0 +1,18 @@
+Project,ONAP Certificate,Own Certificate,MSB Certificate,Path
+AAF,No,Yes,No,aaf/charts/aaf-cert-service/resources/
+AAF,Yes,No,No,aaf/components/aaf-sms/resources/certs/intermediate_root_ca.pem
+AAI,Yes,No,No,aai/oom/resources/config/aai/aai_keystore
+AAI/SEARCH-DATA,Yes,No,No,aai/oom/components/aai-search-data/resources/config/auth/tomcat_keystore
+AAI/SPARKY-BE,Yes,No,No,aai/oom/components/aai-spary-be/resources/config/auth/org.onap.aai.p12
+AAI/BABEL,No,Yes,No,aai/oom/components/aai-babel/resources/config/auth/tomcat_keystore
+AAI/MODEL-LOADER,Yes,Yes,No,aai/oom/components/aai-model-loaderresources/config/auth/tomcat_keystore
+APPC,Yes,No,No,kubernetes/appc/resources/config/certs/org.onap.appc.keyfile
+APPC,Yes,No,No,kubernetes/appc/resources/config/certs/org.onap.appc.p12
+certInitializer,Yes,No,No,kubernetes/common/certInitializer/resources
+DMaaP/MR,Yes,No,No,Hardcoded in container
+HOLMES,No,Yes,No,Hardcoded in container
+MULTICLOUD,No,Yes,No,Hardcoded in container
+Robot,Yes,No,No,kubernetes/robot/resources/config/lighttpd/ssl/onap-robot.onap.*
+SDC,Yes,No?,No?,kubernetes/sdc/resources/cert
+VID,Yes,No,No,Hardcoded in container
+UUI,No,Yes,No,Hardcoded in container
diff --git a/docs/archived/images/consul/consulHealth.png b/docs/archived/images/consul/consulHealth.png
new file mode 100644
index 0000000000..cd7e730c39
--- /dev/null
+++ b/docs/archived/images/consul/consulHealth.png
Binary files differ
diff --git a/docs/archived/images/cp_vms/control_plane_1.png b/docs/archived/images/cp_vms/control_plane_1.png
new file mode 100644
index 0000000000..d59b9863b7
--- /dev/null
+++ b/docs/archived/images/cp_vms/control_plane_1.png
Binary files differ
diff --git a/docs/archived/images/cp_vms/control_plane_2.png b/docs/archived/images/cp_vms/control_plane_2.png
new file mode 100644
index 0000000000..9a7d72f8a5
--- /dev/null
+++ b/docs/archived/images/cp_vms/control_plane_2.png
Binary files differ
diff --git a/docs/archived/images/cp_vms/control_plane_3.png b/docs/archived/images/cp_vms/control_plane_3.png
new file mode 100644
index 0000000000..da329f20b5
--- /dev/null
+++ b/docs/archived/images/cp_vms/control_plane_3.png
Binary files differ
diff --git a/docs/archived/images/cp_vms/control_plane_4.png b/docs/archived/images/cp_vms/control_plane_4.png
new file mode 100644
index 0000000000..817355a99e
--- /dev/null
+++ b/docs/archived/images/cp_vms/control_plane_4.png
Binary files differ
diff --git a/docs/archived/images/cp_vms/control_plane_5.png b/docs/archived/images/cp_vms/control_plane_5.png
new file mode 100644
index 0000000000..33805c50dd
--- /dev/null
+++ b/docs/archived/images/cp_vms/control_plane_5.png
Binary files differ
diff --git a/docs/archived/images/cp_vms/control_plane_6.png b/docs/archived/images/cp_vms/control_plane_6.png
new file mode 100644
index 0000000000..9e8ab638bc
--- /dev/null
+++ b/docs/archived/images/cp_vms/control_plane_6.png
Binary files differ
diff --git a/docs/archived/images/cp_vms/control_plane_7.png b/docs/archived/images/cp_vms/control_plane_7.png
new file mode 100644
index 0000000000..f0db6d3f3f
--- /dev/null
+++ b/docs/archived/images/cp_vms/control_plane_7.png
Binary files differ
diff --git a/docs/archived/images/cp_vms/control_plane_8.png b/docs/archived/images/cp_vms/control_plane_8.png
new file mode 100644
index 0000000000..e20f631e60
--- /dev/null
+++ b/docs/archived/images/cp_vms/control_plane_8.png
Binary files differ
diff --git a/docs/archived/images/floating_ips/floating_1.png b/docs/archived/images/floating_ips/floating_1.png
new file mode 100644
index 0000000000..9f413164ab
--- /dev/null
+++ b/docs/archived/images/floating_ips/floating_1.png
Binary files differ
diff --git a/docs/archived/images/floating_ips/floating_2.png b/docs/archived/images/floating_ips/floating_2.png
new file mode 100644
index 0000000000..0001ef068c
--- /dev/null
+++ b/docs/archived/images/floating_ips/floating_2.png
Binary files differ
diff --git a/docs/archived/images/k8s/kubernetes_objects.png b/docs/archived/images/k8s/kubernetes_objects.png
new file mode 100644
index 0000000000..768a3adb99
--- /dev/null
+++ b/docs/archived/images/k8s/kubernetes_objects.png
Binary files differ
diff --git a/docs/archived/images/keys/key_pair_1.png b/docs/archived/images/keys/key_pair_1.png
new file mode 100644
index 0000000000..1135c93320
--- /dev/null
+++ b/docs/archived/images/keys/key_pair_1.png
Binary files differ
diff --git a/docs/archived/images/keys/key_pair_2.png b/docs/archived/images/keys/key_pair_2.png
new file mode 100644
index 0000000000..ac3bfc5ca2
--- /dev/null
+++ b/docs/archived/images/keys/key_pair_2.png
Binary files differ
diff --git a/docs/archived/images/keys/key_pair_3.png b/docs/archived/images/keys/key_pair_3.png
new file mode 100644
index 0000000000..1e0c0200f8
--- /dev/null
+++ b/docs/archived/images/keys/key_pair_3.png
Binary files differ
diff --git a/docs/archived/images/keys/key_pair_4.png b/docs/archived/images/keys/key_pair_4.png
new file mode 100644
index 0000000000..031a9ba785
--- /dev/null
+++ b/docs/archived/images/keys/key_pair_4.png
Binary files differ
diff --git a/docs/archived/images/msb/MSB-OOM-Diagram.png b/docs/archived/images/msb/MSB-OOM-Diagram.png
new file mode 100644
index 0000000000..4ee878d833
--- /dev/null
+++ b/docs/archived/images/msb/MSB-OOM-Diagram.png
Binary files differ
diff --git a/docs/archived/images/network/network_1.png b/docs/archived/images/network/network_1.png
new file mode 100644
index 0000000000..d51cb1280b
--- /dev/null
+++ b/docs/archived/images/network/network_1.png
Binary files differ
diff --git a/docs/archived/images/network/network_2.png b/docs/archived/images/network/network_2.png
new file mode 100644
index 0000000000..9498a460d3
--- /dev/null
+++ b/docs/archived/images/network/network_2.png
Binary files differ
diff --git a/docs/archived/images/network/network_3.png b/docs/archived/images/network/network_3.png
new file mode 100644
index 0000000000..c729405aef
--- /dev/null
+++ b/docs/archived/images/network/network_3.png
Binary files differ
diff --git a/docs/archived/images/network/network_4.png b/docs/archived/images/network/network_4.png
new file mode 100644
index 0000000000..cc8f96fac0
--- /dev/null
+++ b/docs/archived/images/network/network_4.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_1.png b/docs/archived/images/nfs_server/nfs_server_1.png
new file mode 100644
index 0000000000..912a10f055
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_1.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_10.png b/docs/archived/images/nfs_server/nfs_server_10.png
new file mode 100644
index 0000000000..7d87d1ca56
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_10.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_2.png b/docs/archived/images/nfs_server/nfs_server_2.png
new file mode 100644
index 0000000000..d59b9863b7
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_2.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_3.png b/docs/archived/images/nfs_server/nfs_server_3.png
new file mode 100644
index 0000000000..9a7d72f8a5
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_3.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_4.png b/docs/archived/images/nfs_server/nfs_server_4.png
new file mode 100644
index 0000000000..da329f20b5
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_4.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_5.png b/docs/archived/images/nfs_server/nfs_server_5.png
new file mode 100644
index 0000000000..817355a99e
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_5.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_6.png b/docs/archived/images/nfs_server/nfs_server_6.png
new file mode 100644
index 0000000000..33805c50dd
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_6.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_7.png b/docs/archived/images/nfs_server/nfs_server_7.png
new file mode 100644
index 0000000000..9e8ab638bc
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_7.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_8.png b/docs/archived/images/nfs_server/nfs_server_8.png
new file mode 100644
index 0000000000..14103fb9c3
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_8.png
Binary files differ
diff --git a/docs/archived/images/nfs_server/nfs_server_9.png b/docs/archived/images/nfs_server/nfs_server_9.png
new file mode 100644
index 0000000000..aa8bc140e1
--- /dev/null
+++ b/docs/archived/images/nfs_server/nfs_server_9.png
Binary files differ
diff --git a/docs/archived/images/oom_logo/oomLogoV2-Configure.png b/docs/archived/images/oom_logo/oomLogoV2-Configure.png
new file mode 100644
index 0000000000..bdb1ece10c
--- /dev/null
+++ b/docs/archived/images/oom_logo/oomLogoV2-Configure.png
Binary files differ
diff --git a/docs/archived/images/oom_logo/oomLogoV2-Delete.png b/docs/archived/images/oom_logo/oomLogoV2-Delete.png
new file mode 100644
index 0000000000..10c43d2fb3
--- /dev/null
+++ b/docs/archived/images/oom_logo/oomLogoV2-Delete.png
Binary files differ
diff --git a/docs/archived/images/oom_logo/oomLogoV2-Deploy.png b/docs/archived/images/oom_logo/oomLogoV2-Deploy.png
new file mode 100644
index 0000000000..706097cd6c
--- /dev/null
+++ b/docs/archived/images/oom_logo/oomLogoV2-Deploy.png
Binary files differ
diff --git a/docs/archived/images/oom_logo/oomLogoV2-Heal.png b/docs/archived/images/oom_logo/oomLogoV2-Heal.png
new file mode 100644
index 0000000000..97ac58e9ec
--- /dev/null
+++ b/docs/archived/images/oom_logo/oomLogoV2-Heal.png
Binary files differ
diff --git a/docs/archived/images/oom_logo/oomLogoV2-Monitor.png b/docs/archived/images/oom_logo/oomLogoV2-Monitor.png
new file mode 100644
index 0000000000..c9a184ac37
--- /dev/null
+++ b/docs/archived/images/oom_logo/oomLogoV2-Monitor.png
Binary files differ
diff --git a/docs/archived/images/oom_logo/oomLogoV2-Scale.png b/docs/archived/images/oom_logo/oomLogoV2-Scale.png
new file mode 100644
index 0000000000..140e5ca54f
--- /dev/null
+++ b/docs/archived/images/oom_logo/oomLogoV2-Scale.png
Binary files differ
diff --git a/docs/archived/images/oom_logo/oomLogoV2-Upgrade.png b/docs/archived/images/oom_logo/oomLogoV2-Upgrade.png
new file mode 100644
index 0000000000..d51f6cfcde
--- /dev/null
+++ b/docs/archived/images/oom_logo/oomLogoV2-Upgrade.png
Binary files differ
diff --git a/docs/archived/images/oom_logo/oomLogoV2-medium.png b/docs/archived/images/oom_logo/oomLogoV2-medium.png
new file mode 100644
index 0000000000..20aea693e0
--- /dev/null
+++ b/docs/archived/images/oom_logo/oomLogoV2-medium.png
Binary files differ
diff --git a/docs/archived/images/rke/rke_1.png b/docs/archived/images/rke/rke_1.png
new file mode 100644
index 0000000000..b27fc517df
--- /dev/null
+++ b/docs/archived/images/rke/rke_1.png
Binary files differ
diff --git a/docs/archived/images/sg/sg_1.png b/docs/archived/images/sg/sg_1.png
new file mode 100644
index 0000000000..ff5264d3c4
--- /dev/null
+++ b/docs/archived/images/sg/sg_1.png
Binary files differ
diff --git a/docs/archived/images/sg/sg_2.png b/docs/archived/images/sg/sg_2.png
new file mode 100644
index 0000000000..395057fc97
--- /dev/null
+++ b/docs/archived/images/sg/sg_2.png
Binary files differ
diff --git a/docs/archived/images/wk_vms/worker_1.png b/docs/archived/images/wk_vms/worker_1.png
new file mode 100644
index 0000000000..01314d1557
--- /dev/null
+++ b/docs/archived/images/wk_vms/worker_1.png
Binary files differ
diff --git a/docs/archived/images/wk_vms/worker_2.png b/docs/archived/images/wk_vms/worker_2.png
new file mode 100644
index 0000000000..9a7d72f8a5
--- /dev/null
+++ b/docs/archived/images/wk_vms/worker_2.png
Binary files differ
diff --git a/docs/archived/images/wk_vms/worker_3.png b/docs/archived/images/wk_vms/worker_3.png
new file mode 100644
index 0000000000..93d5e28cf2
--- /dev/null
+++ b/docs/archived/images/wk_vms/worker_3.png
Binary files differ
diff --git a/docs/archived/images/wk_vms/worker_4.png b/docs/archived/images/wk_vms/worker_4.png
new file mode 100644
index 0000000000..817355a99e
--- /dev/null
+++ b/docs/archived/images/wk_vms/worker_4.png
Binary files differ
diff --git a/docs/archived/images/wk_vms/worker_5.png b/docs/archived/images/wk_vms/worker_5.png
new file mode 100644
index 0000000000..33805c50dd
--- /dev/null
+++ b/docs/archived/images/wk_vms/worker_5.png
Binary files differ
diff --git a/docs/archived/images/wk_vms/worker_6.png b/docs/archived/images/wk_vms/worker_6.png
new file mode 100644
index 0000000000..c71c122217
--- /dev/null
+++ b/docs/archived/images/wk_vms/worker_6.png
Binary files differ
diff --git a/docs/archived/images/wk_vms/worker_7.png b/docs/archived/images/wk_vms/worker_7.png
new file mode 100644
index 0000000000..ecb13c1809
--- /dev/null
+++ b/docs/archived/images/wk_vms/worker_7.png
Binary files differ
diff --git a/docs/archived/oom_developer_guide.rst b/docs/archived/oom_developer_guide.rst
new file mode 100644
index 0000000000..f6513bdf83
--- /dev/null
+++ b/docs/archived/oom_developer_guide.rst
@@ -0,0 +1,1149 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung
+.. Modification copyright (C) 2022 Nordix Foundation
+
+.. Links
+.. _Helm: https://docs.helm.sh/
+.. _Helm Charts: https://github.com/kubernetes/charts
+.. _Kubernetes: https://Kubernetes.io/
+.. _Docker: https://www.docker.com/
+.. _Nexus: https://nexus.onap.org/
+.. _AWS Elastic Block Store: https://aws.amazon.com/ebs/
+.. _Azure File: https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction
+.. _GCE Persistent Disk: https://cloud.google.com/compute/docs/disks/
+.. _Gluster FS: https://www.gluster.org/
+.. _Kubernetes Storage Class: https://Kubernetes.io/docs/concepts/storage/storage-classes/
+.. _Assigning Pods to Nodes: https://Kubernetes.io/docs/concepts/configuration/assign-pod-node/
+
+
+.. _developer-guide-label:
+
+OOM Developer Guide
+###################
+
+.. figure:: ../../resources/images/oom_logo/oomLogoV2-medium.png
+ :align: right
+
+ONAP consists of a large number of components, each of which are substantial
+projects within themselves, which results in a high degree of complexity in
+deployment and management. To cope with this complexity the ONAP Operations
+Manager (OOM) uses a Helm_ model of ONAP - Helm being the primary management
+system for Kubernetes_ container systems - to drive all user driven life-cycle
+management operations. The Helm model of ONAP is composed of a set of
+hierarchical Helm charts that define the structure of the ONAP components and
+the configuration of these components. These charts are fully parameterized
+such that a single environment file defines all of the parameters needed to
+deploy ONAP. A user of ONAP may maintain several such environment files to
+control the deployment of ONAP in multiple environments such as development,
+pre-production, and production.
+
+The following sections describe how the ONAP Helm charts are constructed.
+
+.. contents::
+ :depth: 3
+ :local:
+..
+
+Container Background
+====================
+Linux containers allow for an application and all of its operating system
+dependencies to be packaged and deployed as a single unit without including a
+guest operating system as done with virtual machines. The most popular
+container solution is Docker_ which provides tools for container management
+like the Docker Host (dockerd) which can create, run, stop, move, or delete a
+container. Docker has a very popular registry of containers images that can be
+used by any Docker system; however, in the ONAP context, Docker images are
+built by the standard CI/CD flow and stored in Nexus_ repositories. OOM uses
+the "standard" ONAP docker containers and three new ones specifically created
+for OOM.
+
+Containers are isolated from each other primarily via name spaces within the
+Linux kernel without the need for multiple guest operating systems. As such,
+multiple containers can be deployed with little overhead such as all of ONAP
+can be deployed on a single host. With some optimization of the ONAP components
+(e.g. elimination of redundant database instances) it may be possible to deploy
+ONAP on a single laptop computer.
+
+Helm Charts
+===========
+A Helm chart is a collection of files that describe a related set of Kubernetes
+resources. A simple chart might be used to deploy something simple, like a
+memcached pod, while a complex chart might contain many micro-service arranged
+in a hierarchy as found in the `aai` ONAP component.
+
+Charts are created as files laid out in a particular directory tree, then they
+can be packaged into versioned archives to be deployed. There is a public
+archive of `Helm Charts`_ on GitHub that includes many technologies applicable
+to ONAP. Some of these charts have been used in ONAP and all of the ONAP charts
+have been created following the guidelines provided.
+
+The top level of the ONAP charts is shown below:
+
+.. code-block:: bash
+
+ common
+ ├── cassandra
+ │   ├── Chart.yaml
+ │   ├── resources
+ │   │   ├── config
+ │   │   │   └── docker-entrypoint.sh
+ │   │   ├── exec.py
+ │   │   └── restore.sh
+ │   ├── templates
+ │   │   ├── backup
+ │   │   │   ├── configmap.yaml
+ │   │   │   ├── cronjob.yaml
+ │   │   │   ├── pv.yaml
+ │   │   │   └── pvc.yaml
+ │   │   ├── configmap.yaml
+ │   │   ├── pv.yaml
+ │   │   ├── service.yaml
+ │   │   └── statefulset.yaml
+ │   └── values.yaml
+ ├── common
+ │   ├── Chart.yaml
+ │   ├── templates
+ │   │   ├── _createPassword.tpl
+ │   │   ├── _ingress.tpl
+ │   │   ├── _labels.tpl
+ │   │   ├── _mariadb.tpl
+ │   │   ├── _name.tpl
+ │   │   ├── _namespace.tpl
+ │   │   ├── _repository.tpl
+ │   │   ├── _resources.tpl
+ │   │   ├── _secret.yaml
+ │   │   ├── _service.tpl
+ │   │   ├── _storage.tpl
+ │   │   └── _tplValue.tpl
+ │   └── values.yaml
+ ├── ...
+ └── postgres-legacy
+    ├── Chart.yaml
+ ├── charts
+ └── configs
+
+The common section of charts consists of a set of templates that assist with
+parameter substitution (`_name.tpl`, `_namespace.tpl` and others) and a set of
+charts for components used throughout ONAP. When the common components are used
+by other charts they are instantiated each time or we can deploy a shared
+instances for several components.
+
+All of the ONAP components have charts that follow the pattern shown below:
+
+.. code-block:: bash
+
+ name-of-my-component
+ ├── Chart.yaml
+ ├── component
+ │   └── subcomponent-folder
+ ├── charts
+ │   └── subchart-folder
+ ├── resources
+ │   ├── folder1
+ │   │   ├── file1
+ │   │   └── file2
+ │   └── folder1
+ │   ├── file3
+ │   └── folder3
+ │      └── file4
+ ├── templates
+ │   ├── NOTES.txt
+ │   ├── configmap.yaml
+ │   ├── deployment.yaml
+ │   ├── ingress.yaml
+ │   ├── job.yaml
+ │   ├── secrets.yaml
+ │   └── service.yaml
+ └── values.yaml
+
+Note that the component charts / components may include a hierarchy of sub
+components and in themselves can be quite complex.
+
+You can use either `charts` or `components` folder for your subcomponents.
+`charts` folder means that the subcomponent will always been deployed.
+
+`components` folders means we can choose if we want to deploy the
+subcomponent.
+
+This choice is done in root `values.yaml`:
+
+.. code-block:: yaml
+
+ ---
+ global:
+ key: value
+
+ component1:
+ enabled: true
+ component2:
+ enabled: true
+
+Then in `Chart.yaml` dependencies section, you'll use these values:
+
+.. code-block:: yaml
+
+ ---
+ dependencies:
+ - name: common
+ version: ~x.y-0
+ repository: '@local'
+ - name: component1
+ version: ~x.y-0
+ repository: 'file://components/component1'
+ condition: component1.enabled
+ - name: component2
+ version: ~x.y-0
+ repository: 'file://components/component2'
+ condition: component2.enabled
+
+Configuration of the components varies somewhat from component to component but
+generally follows the pattern of one or more `configmap.yaml` files which can
+directly provide configuration to the containers in addition to processing
+configuration files stored in the `config` directory. It is the responsibility
+of each ONAP component team to update these configuration files when changes
+are made to the project containers that impact configuration.
+
+The following section describes how the hierarchical ONAP configuration system
+is key to management of such a large system.
+
+Configuration Management
+========================
+
+ONAP is a large system composed of many components - each of which are complex
+systems in themselves - that needs to be deployed in a number of different
+ways. For example, within a single operator's network there may be R&D
+deployments under active development, pre-production versions undergoing system
+testing and production systems that are operating live networks. Each of these
+deployments will differ in significant ways, such as the version of the
+software images deployed. In addition, there may be a number of application
+specific configuration differences, such as operating system environment
+variables. The following describes how the Helm configuration management
+system is used within the OOM project to manage both ONAP infrastructure
+configuration as well as ONAP components configuration.
+
+One of the artifacts that OOM/Kubernetes uses to deploy ONAP components is the
+deployment specification, yet another yaml file. Within these deployment specs
+are a number of parameters as shown in the following example:
+
+.. code-block:: yaml
+
+ apiVersion: apps/v1
+ kind: StatefulSet
+ metadata:
+ labels:
+ app.kubernetes.io/name: zookeeper
+ helm.sh/chart: zookeeper
+ app.kubernetes.io/component: server
+ app.kubernetes.io/managed-by: Tiller
+ app.kubernetes.io/instance: onap-oof
+ name: onap-oof-zookeeper
+ namespace: onap
+ spec:
+ <...>
+ replicas: 3
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: zookeeper
+ app.kubernetes.io/component: server
+ app.kubernetes.io/instance: onap-oof
+ serviceName: onap-oof-zookeeper-headless
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: zookeeper
+ helm.sh/chart: zookeeper
+ app.kubernetes.io/component: server
+ app.kubernetes.io/managed-by: Tiller
+ app.kubernetes.io/instance: onap-oof
+ spec:
+ <...>
+ affinity:
+ containers:
+ - name: zookeeper
+ <...>
+ image: gcr.io/google_samples/k8szk:v3
+ imagePullPolicy: Always
+ <...>
+ ports:
+ - containerPort: 2181
+ name: client
+ protocol: TCP
+ - containerPort: 3888
+ name: election
+ protocol: TCP
+ - containerPort: 2888
+ name: server
+ protocol: TCP
+ <...>
+
+Note that within the statefulset specification, one of the container arguments
+is the key/value pair image: gcr.io/google_samples/k8szk:v3 which
+specifies the version of the zookeeper software to deploy. Although the
+statefulset specifications greatly simplify statefulset, maintenance of the
+statefulset specifications themselves become problematic as software versions
+change over time or as different versions are required for different
+statefulsets. For example, if the R&D team needs to deploy a newer version of
+mariadb than what is currently used in the production environment, they would
+need to clone the statefulset specification and change this value. Fortunately,
+this problem has been solved with the templating capabilities of Helm.
+
+The following example shows how the statefulset specifications are modified to
+incorporate Helm templates such that key/value pairs can be defined outside of
+the statefulset specifications and passed during instantiation of the component.
+
+.. code-block:: yaml
+
+ apiVersion: apps/v1
+ kind: StatefulSet
+ metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels: {{- include "common.labels" . | nindent 4 }}
+ spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels: {{- include "common.matchLabels" . | nindent 6 }}
+ # serviceName is only needed for StatefulSet
+ # put the postfix part only if you have add a postfix on the service name
+ serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }}
+ <...>
+ template:
+ metadata:
+ labels: {{- include "common.labels" . | nindent 8 }}
+ annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
+ name: {{ include "common.name" . }}
+ spec:
+ <...>
+ containers:
+ - name: {{ include "common.name" . }}
+ image: {{ .Values.image }}
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ ports:
+ {{- range $index, $port := .Values.service.ports }}
+ - containerPort: {{ $port.port }}
+ name: {{ $port.name }}
+ {{- end }}
+ {{- range $index, $port := .Values.service.headlessPorts }}
+ - containerPort: {{ $port.port }}
+ name: {{ $port.name }}
+ {{- end }}
+ <...>
+
+This version of the statefulset specification has gone through the process of
+templating values that are likely to change between statefulsets. Note that the
+image is now specified as: image: {{ .Values.image }} instead of a
+string used previously. During the statefulset phase, Helm (actually the Helm
+sub-component Tiller) substitutes the {{ .. }} entries with a variable defined
+in a values.yaml file. The content of this file is as follows:
+
+.. code-block:: yaml
+
+ <...>
+ image: gcr.io/google_samples/k8szk:v3
+ replicaCount: 3
+ <...>
+
+
+Within the values.yaml file there is an image key with the value
+`gcr.io/google_samples/k8szk:v3` which is the same value used in
+the non-templated version. Once all of the substitutions are complete, the
+resulting statefulset specification ready to be used by Kubernetes.
+
+When creating a template consider the use of default values if appropriate.
+Helm templating has built in support for DEFAULT values, here is
+an example:
+
+.. code-block:: yaml
+
+ imagePullSecrets:
+ - name: "{{ .Values.nsPrefix | default "onap" }}-docker-registry-key"
+
+The pipeline operator ("|") used here hints at that power of Helm templates in
+that much like an operating system command line the pipeline operator allow
+over 60 Helm functions to be embedded directly into the template (note that the
+Helm template language is a superset of the Go template language). These
+functions include simple string operations like upper and more complex flow
+control operations like if/else.
+
+OOM is mainly helm templating. In order to have consistent deployment of the
+different components of ONAP, some rules must be followed.
+
+Templates are provided in order to create Kubernetes resources (Secrets,
+Ingress, Services, ...) or part of Kubernetes resources (names, labels,
+resources requests and limits, ...).
+
+a full list and simple description is done in
+`kubernetes/common/common/documentation.rst`.
+
+Service template
+----------------
+
+In order to create a Service for a component, you have to create a file (with
+`service` in the name.
+For normal service, just put the following line:
+
+.. code-block:: yaml
+
+ {{ include "common.service" . }}
+
+For headless service, the line to put is the following:
+
+.. code-block:: yaml
+
+ {{ include "common.headlessService" . }}
+
+The configuration of the service is done in component `values.yaml`:
+
+.. code-block:: yaml
+
+ service:
+ name: NAME-OF-THE-SERVICE
+ postfix: MY-POSTFIX
+ type: NodePort
+ annotations:
+ someAnnotationsKey: value
+ ports:
+ - name: tcp-MyPort
+ port: 5432
+ nodePort: 88
+ - name: http-api
+ port: 8080
+ nodePort: 89
+ - name: https-api
+ port: 9443
+ nodePort: 90
+
+`annotations` and `postfix` keys are optional.
+if `service.type` is `NodePort`, then you have to give `nodePort` value for your
+service ports (which is the end of the computed nodePort, see example).
+
+It would render the following Service Resource (for a component named
+`name-of-my-component`, with version `x.y.z`, helm deployment name
+`my-deployment` and `global.nodePortPrefix` `302`):
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ someAnnotationsKey: value
+ name: NAME-OF-THE-SERVICE-MY-POSTFIX
+ labels:
+ app.kubernetes.io/name: name-of-my-component
+ helm.sh/chart: name-of-my-component-x.y.z
+ app.kubernetes.io/instance: my-deployment-name-of-my-component
+ app.kubernetes.io/managed-by: Tiller
+ spec:
+ ports:
+ - port: 5432
+ targetPort: tcp-MyPort
+ nodePort: 30288
+ - port: 8080
+ targetPort: http-api
+ nodePort: 30289
+ - port: 9443
+ targetPort: https-api
+ nodePort: 30290
+ selector:
+ app.kubernetes.io/name: name-of-my-component
+ app.kubernetes.io/instance: my-deployment-name-of-my-component
+ type: NodePort
+
+In the deployment or statefulSet file, you needs to set the good labels in
+order for the service to match the pods.
+
+here's an example to be sure it matches (for a statefulSet):
+
+.. code-block:: yaml
+
+ apiVersion: apps/v1
+ kind: StatefulSet
+ metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels: {{- include "common.labels" . | nindent 4 }}
+ spec:
+ selector:
+ matchLabels: {{- include "common.matchLabels" . | nindent 6 }}
+ # serviceName is only needed for StatefulSet
+ # put the postfix part only if you have add a postfix on the service name
+ serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }}
+ <...>
+ template:
+ metadata:
+ labels: {{- include "common.labels" . | nindent 8 }}
+ annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
+ name: {{ include "common.name" . }}
+ spec:
+ <...>
+ containers:
+ - name: {{ include "common.name" . }}
+ ports:
+ {{- range $index, $port := .Values.service.ports }}
+ - containerPort: {{ $port.port }}
+ name: {{ $port.name }}
+ {{- end }}
+ {{- range $index, $port := .Values.service.headlessPorts }}
+ - containerPort: {{ $port.port }}
+ name: {{ $port.name }}
+ {{- end }}
+ <...>
+
+The configuration of the service is done in component `values.yaml`:
+
+.. code-block:: yaml
+
+ service:
+ name: NAME-OF-THE-SERVICE
+ headless:
+ postfix: NONE
+ annotations:
+ anotherAnnotationsKey : value
+ publishNotReadyAddresses: true
+ headlessPorts:
+ - name: tcp-MyPort
+ port: 5432
+ - name: http-api
+ port: 8080
+ - name: https-api
+ port: 9443
+
+`headless.annotations`, `headless.postfix` and
+`headless.publishNotReadyAddresses` keys are optional.
+
+If `headless.postfix` is not set, then we'll add `-headless` at the end of the
+service name.
+
+If it set to `NONE`, there will be not postfix.
+
+And if set to something, it will add `-something` at the end of the service
+name.
+
+It would render the following Service Resource (for a component named
+`name-of-my-component`, with version `x.y.z`, helm deployment name
+`my-deployment` and `global.nodePortPrefix` `302`):
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ anotherAnnotationsKey: value
+ name: NAME-OF-THE-SERVICE
+ labels:
+ app.kubernetes.io/name: name-of-my-component
+ helm.sh/chart: name-of-my-component-x.y.z
+ app.kubernetes.io/instance: my-deployment-name-of-my-component
+ app.kubernetes.io/managed-by: Tiller
+ spec:
+ clusterIP: None
+ ports:
+ - port: 5432
+ targetPort: tcp-MyPort
+ nodePort: 30288
+ - port: 8080
+ targetPort: http-api
+ nodePort: 30289
+ - port: 9443
+ targetPort: https-api
+ nodePort: 30290
+ publishNotReadyAddresses: true
+ selector:
+ app.kubernetes.io/name: name-of-my-component
+ app.kubernetes.io/instance: my-deployment-name-of-my-component
+ type: ClusterIP
+
+Previous example of StatefulSet would also match (except for the `postfix` part
+obviously).
+
+Creating Deployment or StatefulSet
+----------------------------------
+
+Deployment and StatefulSet should use the `apps/v1` (which has appeared in
+v1.9).
+As seen on the service part, the following parts are mandatory:
+
+.. code-block:: yaml
+
+ apiVersion: apps/v1
+ kind: StatefulSet
+ metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels: {{- include "common.labels" . | nindent 4 }}
+ spec:
+ selector:
+ matchLabels: {{- include "common.matchLabels" . | nindent 6 }}
+ # serviceName is only needed for StatefulSet
+ # put the postfix part only if you have add a postfix on the service name
+ serviceName: {{ include "common.servicename" . }}-{{ .Values.service.postfix }}
+ <...>
+ template:
+ metadata:
+ labels: {{- include "common.labels" . | nindent 8 }}
+ annotations: {{- include "common.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
+ name: {{ include "common.name" . }}
+ spec:
+ <...>
+ containers:
+ - name: {{ include "common.name" . }}
+
+ONAP Application Configuration
+------------------------------
+
+Dependency Management
+---------------------
+These Helm charts describe the desired state
+of an ONAP deployment and instruct the Kubernetes container manager as to how
+to maintain the deployment in this state. These dependencies dictate the order
+in-which the containers are started for the first time such that such
+dependencies are always met without arbitrary sleep times between container
+startups. For example, the SDC back-end container requires the Elastic-Search,
+Cassandra and Kibana containers within SDC to be ready and is also dependent on
+DMaaP (or the message-router) to be ready - where ready implies the built-in
+"readiness" probes succeeded - before becoming fully operational. When an
+initial deployment of ONAP is requested the current state of the system is NULL
+so ONAP is deployed by the Kubernetes manager as a set of Docker containers on
+one or more predetermined hosts. The hosts could be physical machines or
+virtual machines. When deploying on virtual machines the resulting system will
+be very similar to "Heat" based deployments, i.e. Docker containers running
+within a set of VMs, the primary difference being that the allocation of
+containers to VMs is done dynamically with OOM and statically with "Heat".
+Example SO deployment descriptor file shows SO's dependency on its mariadb
+data-base component:
+
+SO deployment specification excerpt:
+
+.. code-block:: yaml
+
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels: {{- include "common.labels" . | nindent 4 }}
+ spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels: {{- include "common.matchLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ initContainers:
+ - command:
+ - /app/ready.py
+ args:
+ - --container-name
+ - so-mariadb
+ env:
+ ...
+
+Kubernetes Container Orchestration
+==================================
+The ONAP components are managed by the Kubernetes_ container management system
+which maintains the desired state of the container system as described by one
+or more deployment descriptors - similar in concept to OpenStack HEAT
+Orchestration Templates. The following sections describe the fundamental
+objects managed by Kubernetes, the network these components use to communicate
+with each other and other entities outside of ONAP and the templates that
+describe the configuration and desired state of the ONAP components.
+
+Name Spaces
+-----------
+Within the namespaces are Kubernetes services that provide external
+connectivity to pods that host Docker containers.
+
+ONAP Components to Kubernetes Object Relationships
+--------------------------------------------------
+Kubernetes deployments consist of multiple objects:
+
+- **nodes** - a worker machine - either physical or virtual - that hosts
+ multiple containers managed by Kubernetes.
+- **services** - an abstraction of a logical set of pods that provide a
+ micro-service.
+- **pods** - one or more (but typically one) container(s) that provide specific
+ application functionality.
+- **persistent volumes** - One or more permanent volumes need to be established
+ to hold non-ephemeral configuration and state data.
+
+The relationship between these objects is shown in the following figure:
+
+.. .. uml::
+..
+.. @startuml
+.. node PH {
+.. component Service {
+.. component Pod0
+.. component Pod1
+.. }
+.. }
+..
+.. database PV
+.. @enduml
+
+.. figure:: ../../resources/images/k8s/kubernetes_objects.png
+
+OOM uses these Kubernetes objects as described in the following sections.
+
+Nodes
+~~~~~
+OOM works with both physical and virtual worker machines.
+
+* Virtual Machine Deployments - If ONAP is to be deployed onto a set of virtual
+ machines, the creation of the VMs is outside of the scope of OOM and could be
+ done in many ways, such as
+
+ * manually, for example by a user using the OpenStack Horizon dashboard or
+ AWS EC2, or
+ * automatically, for example with the use of a OpenStack Heat Orchestration
+ Template which builds an ONAP stack, Azure ARM template, AWS CloudFormation
+ Template, or
+ * orchestrated, for example with Cloudify creating the VMs from a TOSCA
+ template and controlling their life cycle for the life of the ONAP
+ deployment.
+
+* Physical Machine Deployments - If ONAP is to be deployed onto physical
+ machines there are several options but the recommendation is to use Rancher
+ along with Helm to associate hosts with a Kubernetes cluster.
+
+Pods
+~~~~
+A group of containers with shared storage and networking can be grouped
+together into a Kubernetes pod. All of the containers within a pod are
+co-located and co-scheduled so they operate as a single unit. Within ONAP
+Amsterdam release, pods are mapped one-to-one to docker containers although
+this may change in the future. As explained in the Services section below the
+use of Pods within each ONAP component is abstracted from other ONAP
+components.
+
+Services
+~~~~~~~~
+OOM uses the Kubernetes service abstraction to provide a consistent access
+point for each of the ONAP components independent of the pod or container
+architecture of that component. For example, the SDNC component may introduce
+OpenDaylight clustering as some point and change the number of pods in this
+component to three or more but this change will be isolated from the other ONAP
+components by the service abstraction. A service can include a load balancer
+on its ingress to distribute traffic between the pods and even react to dynamic
+changes in the number of pods if they are part of a replica set.
+
+Persistent Volumes
+~~~~~~~~~~~~~~~~~~
+To enable ONAP to be deployed into a wide variety of cloud infrastructures a
+flexible persistent storage architecture, built on Kubernetes persistent
+volumes, provides the ability to define the physical storage in a central
+location and have all ONAP components securely store their data.
+
+When deploying ONAP into a public cloud, available storage services such as
+`AWS Elastic Block Store`_, `Azure File`_, or `GCE Persistent Disk`_ are
+options. Alternatively, when deploying into a private cloud the storage
+architecture might consist of Fiber Channel, `Gluster FS`_, or iSCSI. Many
+other storage options existing, refer to the `Kubernetes Storage Class`_
+documentation for a full list of the options. The storage architecture may vary
+from deployment to deployment but in all cases a reliable, redundant storage
+system must be provided to ONAP with which the state information of all ONAP
+components will be securely stored. The Storage Class for a given deployment is
+a single parameter listed in the ONAP values.yaml file and therefore is easily
+customized. Operation of this storage system is outside the scope of the OOM.
+
+.. code-block:: yaml
+
+ Insert values.yaml code block with storage block here
+
+Once the storage class is selected and the physical storage is provided, the
+ONAP deployment step creates a pool of persistent volumes within the given
+physical storage that is used by all of the ONAP components. ONAP components
+simply make a claim on these persistent volumes (PV), with a persistent volume
+claim (PVC), to gain access to their storage.
+
+The following figure illustrates the relationships between the persistent
+volume claims, the persistent volumes, the storage class, and the physical
+storage.
+
+.. graphviz::
+
+ digraph PV {
+ label = "Persistance Volume Claim to Physical Storage Mapping"
+ {
+ node [shape=cylinder]
+ D0 [label="Drive0"]
+ D1 [label="Drive1"]
+ Dx [label="Drivex"]
+ }
+ {
+ node [shape=Mrecord label="StorageClass:ceph"]
+ sc
+ }
+ {
+ node [shape=point]
+ p0 p1 p2
+ p3 p4 p5
+ }
+ subgraph clusterSDC {
+ label="SDC"
+ PVC0
+ PVC1
+ }
+ subgraph clusterSDNC {
+ label="SDNC"
+ PVC2
+ }
+ subgraph clusterSO {
+ label="SO"
+ PVCn
+ }
+ PV0 -> sc
+ PV1 -> sc
+ PV2 -> sc
+ PVn -> sc
+
+ sc -> {D0 D1 Dx}
+ PVC0 -> PV0
+ PVC1 -> PV1
+ PVC2 -> PV2
+ PVCn -> PVn
+
+ # force all of these nodes to the same line in the given order
+ subgraph {
+ rank = same; PV0;PV1;PV2;PVn;p0;p1;p2
+ PV0->PV1->PV2->p0->p1->p2->PVn [style=invis]
+ }
+
+ subgraph {
+ rank = same; D0;D1;Dx;p3;p4;p5
+ D0->D1->p3->p4->p5->Dx [style=invis]
+ }
+
+ }
+
+In-order for an ONAP component to use a persistent volume it must make a claim
+against a specific persistent volume defined in the ONAP common charts. Note
+that there is a one-to-one relationship between a PVC and PV. The following is
+an excerpt from a component chart that defines a PVC:
+
+.. code-block:: yaml
+
+ Insert PVC example here
+
+OOM Networking with Kubernetes
+------------------------------
+
+- DNS
+- Ports - Flattening the containers also expose port conflicts between the
+ containers which need to be resolved.
+
+Node Ports
+~~~~~~~~~~
+
+Pod Placement Rules
+-------------------
+OOM will use the rich set of Kubernetes node and pod affinity /
+anti-affinity rules to minimize the chance of a single failure resulting in a
+loss of ONAP service. Node affinity / anti-affinity is used to guide the
+Kubernetes orchestrator in the placement of pods on nodes (physical or virtual
+machines). For example:
+
+- if a container used Intel DPDK technology the pod may state that it as
+ affinity to an Intel processor based node, or
+- geographical based node labels (such as the Kubernetes standard zone or
+ region labels) may be used to ensure placement of a DCAE complex close to the
+ VNFs generating high volumes of traffic thus minimizing networking cost.
+ Specifically, if nodes were pre-assigned labels East and West, the pod
+ deployment spec to distribute pods to these nodes would be:
+
+.. code-block:: yaml
+
+ nodeSelector:
+ failure-domain.beta.Kubernetes.io/region: {{ .Values.location }}
+
+- "location: West" is specified in the `values.yaml` file used to deploy
+ one DCAE cluster and "location: East" is specified in a second `values.yaml`
+ file (see OOM Configuration Management for more information about
+ configuration files like the `values.yaml` file).
+
+Node affinity can also be used to achieve geographic redundancy if pods are
+assigned to multiple failure domains. For more information refer to `Assigning
+Pods to Nodes`_.
+
+.. note::
+ One could use Pod to Node assignment to totally constrain Kubernetes when
+ doing initial container assignment to replicate the Amsterdam release
+ OpenStack Heat based deployment. Should one wish to do this, each VM would
+ need a unique node name which would be used to specify a node constaint
+ for every component. These assignment could be specified in an environment
+ specific values.yaml file. Constraining Kubernetes in this way is not
+ recommended.
+
+Kubernetes has a comprehensive system called Taints and Tolerations that can be
+used to force the container orchestrator to repel pods from nodes based on
+static events (an administrator assigning a taint to a node) or dynamic events
+(such as a node becoming unreachable or running out of disk space). There are
+no plans to use taints or tolerations in the ONAP Beijing release. Pod
+affinity / anti-affinity is the concept of creating a spacial relationship
+between pods when the Kubernetes orchestrator does assignment (both initially
+an in operation) to nodes as explained in Inter-pod affinity and anti-affinity.
+For example, one might choose to co-located all of the ONAP SDC containers on a
+single node as they are not critical runtime components and co-location
+minimizes overhead. On the other hand, one might choose to ensure that all of
+the containers in an ODL cluster (SDNC and APPC) are placed on separate nodes
+such that a node failure has minimal impact to the operation of the cluster.
+An example of how pod affinity / anti-affinity is shown below:
+
+Pod Affinity / Anti-Affinity
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: with-pod-affinity
+ spec:
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: security
+ operator: In
+ values:
+ - S1
+ topologyKey: failure-domain.beta.Kubernetes.io/zone
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: security
+ operator: In
+ values:
+ - S2
+ topologyKey: Kubernetes.io/hostname
+ containers:
+ - name: with-pod-affinity
+ image: gcr.io/google_containers/pause:2.0
+
+This example contains both podAffinity and podAntiAffinity rules, the first
+rule is is a must (requiredDuringSchedulingIgnoredDuringExecution) while the
+second will be met pending other considerations
+(preferredDuringSchedulingIgnoredDuringExecution). Preemption Another feature
+that may assist in achieving a repeatable deployment in the presence of faults
+that may have reduced the capacity of the cloud is assigning priority to the
+containers such that mission critical components have the ability to evict less
+critical components. Kubernetes provides this capability with Pod Priority and
+Preemption. Prior to having more advanced production grade features available,
+the ability to at least be able to re-deploy ONAP (or a subset of) reliably
+provides a level of confidence that should an outage occur the system can be
+brought back on-line predictably.
+
+Health Checks
+-------------
+
+Monitoring of ONAP components is configured in the agents within JSON files and
+stored in gerrit under the consul-agent-config, here is an example from the AAI
+model loader (aai-model-loader-health.json):
+
+.. code-block:: json
+
+ {
+ "service": {
+ "name": "A&AI Model Loader",
+ "checks": [
+ {
+ "id": "model-loader-process",
+ "name": "Model Loader Presence",
+ "script": "/consul/config/scripts/model-loader-script.sh",
+ "interval": "15s",
+ "timeout": "1s"
+ }
+ ]
+ }
+ }
+
+Liveness Probes
+---------------
+
+These liveness probes can simply check that a port is available, that a
+built-in health check is reporting good health, or that the Consul health check
+is positive. For example, to monitor the SDNC component has following liveness
+probe can be found in the SDNC DB deployment specification:
+
+.. code-block:: yaml
+
+ sdnc db liveness probe
+
+ livenessProbe:
+ exec:
+ command: ["mysqladmin", "ping"]
+ initialDelaySeconds: 30 periodSeconds: 10
+ timeoutSeconds: 5
+
+The 'initialDelaySeconds' control the period of time between the readiness
+probe succeeding and the liveness probe starting. 'periodSeconds' and
+'timeoutSeconds' control the actual operation of the probe. Note that
+containers are inherently ephemeral so the healing action destroys failed
+containers and any state information within it. To avoid a loss of state, a
+persistent volume should be used to store all data that needs to be persisted
+over the re-creation of a container. Persistent volumes have been created for
+the database components of each of the projects and the same technique can be
+used for all persistent state information.
+
+
+
+Environment Files
+~~~~~~~~~~~~~~~~~
+
+MSB Integration
+===============
+
+The \ `Microservices Bus
+Project <https://wiki.onap.org/pages/viewpage.action?pageId=3246982>`__ provides
+facilities to integrate micro-services into ONAP and therefore needs to
+integrate into OOM - primarily through Consul which is the backend of
+MSB service discovery. The following is a brief description of how this
+integration will be done:
+
+A registrator to push the service endpoint info to MSB service
+discovery.
+
+- The needed service endpoint info is put into the kubernetes yaml file
+ as annotation, including service name, Protocol,version, visual
+ range,LB method, IP, Port,etc.
+
+- OOM deploy/start/restart/scale in/scale out/upgrade ONAP components
+
+- Registrator watch the kubernetes event
+
+- When an ONAP component instance has been started/destroyed by OOM,
+ Registrator get the notification from kubernetes
+
+- Registrator parse the service endpoint info from annotation and
+ register/update/unregister it to MSB service discovery
+
+- MSB API Gateway uses the service endpoint info for service routing
+ and load balancing.
+
+Details of the registration service API can be found at \ `Microservice
+Bus API
+Documentation <https://wiki.onap.org/display/DW/Microservice+Bus+API+Documentation>`__.
+
+ONAP Component Registration to MSB
+----------------------------------
+The charts of all ONAP components intending to register against MSB must have
+an annotation in their service(s) template. A `sdc` example follows:
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ app: sdc-be
+ name: sdc-be
+ namespace: "{{ .Values.nsPrefix }}"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "sdc",
+ "version": "v1",
+ "url": "/sdc/v1",
+ "protocol": "REST",
+ "port": "8080",
+ "visualRange":"1"
+ },
+ {
+ "serviceName": "sdc-deprecated",
+ "version": "v1",
+ "url": "/sdc/v1",
+ "protocol": "REST",
+ "port": "8080",
+ "visualRange":"1",
+ "path":"/sdc/v1"
+ }
+ ]'
+ ...
+
+
+MSB Integration with OOM
+------------------------
+A preliminary view of the OOM-MSB integration is as follows:
+
+.. figure:: ../../resources/images/msb/MSB-OOM-Diagram.png
+
+A message sequence chart of the registration process:
+
+.. uml::
+
+ participant "OOM" as oom
+ participant "ONAP Component" as onap
+ participant "Service Discovery" as sd
+ participant "External API Gateway" as eagw
+ participant "Router (Internal API Gateway)" as iagw
+
+ box "MSB" #LightBlue
+ participant sd
+ participant eagw
+ participant iagw
+ end box
+
+ == Deploy Servcie ==
+
+ oom -> onap: Deploy
+ oom -> sd: Register service endpoints
+ sd -> eagw: Services exposed to external system
+ sd -> iagw: Services for internal use
+
+ == Component Life-cycle Management ==
+
+ oom -> onap: Start/Stop/Scale/Migrate/Upgrade
+ oom -> sd: Update service info
+ sd -> eagw: Update service info
+ sd -> iagw: Update service info
+
+ == Service Health Check ==
+
+ sd -> onap: Check the health of service
+ sd -> eagw: Update service status
+ sd -> iagw: Update service status
+
+
+MSB Deployment Instructions
+---------------------------
+MSB is helm installable ONAP component which is often automatically deployed.
+To install it individually enter::
+
+ > helm install <repo-name>/msb
+
+.. note::
+ TBD: Vaidate if the following procedure is still required.
+
+Please note that Kubernetes authentication token must be set at
+*kubernetes/kube2msb/values.yaml* so the kube2msb registrator can get the
+access to watch the kubernetes events and get service annotation by
+Kubernetes APIs. The token can be found in the kubectl configuration file
+*~/.kube/config*
+
+More details can be found here `MSB installation <https://docs.onap.org/projects/onap-msb-apigateway/en/latest/platform/installation.html>`_.
+
+.. MISC
+.. ====
+.. Note that although OOM uses Kubernetes facilities to minimize the effort
+.. required of the ONAP component owners to implement a successful rolling
+.. upgrade strategy there are other considerations that must be taken into
+.. consideration.
+.. For example, external APIs - both internal and external to ONAP - should be
+.. designed to gracefully accept transactions from a peer at a different
+.. software version to avoid deadlock situations. Embedded version codes in
+.. messages may facilitate such capabilities.
+..
+.. Within each of the projects a new configuration repository contains all of
+.. the project specific configuration artifacts. As changes are made within
+.. the project, it's the responsibility of the project team to make appropriate
+.. changes to the configuration data.
diff --git a/docs/archived/oom_hardcoded_certificates.rst b/docs/archived/oom_hardcoded_certificates.rst
new file mode 100644
index 0000000000..326cd3980f
--- /dev/null
+++ b/docs/archived/oom_hardcoded_certificates.rst
@@ -0,0 +1,18 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2018 Amdocs, Bell Canada, 2020 Nokia Solutions and Networks
+
+:orphan:
+
+.. Links
+.. _hardcoded-certificates-label:
+
+ONAP Hardcoded certificates
+###########################
+
+ONAP current installation have hardcoded certificates.
+Here's the list of these certificates:
+
+.. csv-table::
+ :file: certs/hardcoded_certificates.csv
diff --git a/docs/archived/oom_quickstart_guide.rst b/docs/archived/oom_quickstart_guide.rst
new file mode 100644
index 0000000000..b7749b1056
--- /dev/null
+++ b/docs/archived/oom_quickstart_guide.rst
@@ -0,0 +1,284 @@
+.. This work is licensed under a
+.. Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2019-2020 Amdocs, Bell Canada, Orange, Samsung
+.. _oom_quickstart_guide:
+.. _quick-start-label:
+
+OOM Quick Start Guide
+#####################
+
+.. figure:: images/oom_logo/oomLogoV2-medium.png
+ :align: right
+
+Once a Kubernetes environment is available (follow the instructions in
+:ref:`cloud-setup-guide-label` if you don't have a cloud environment
+available), follow the following instructions to deploy ONAP.
+
+**Step 1.** Clone the OOM repository from ONAP gerrit::
+
+ > git clone -b <BRANCH> http://gerrit.onap.org/r/oom --recurse-submodules
+ > cd oom/kubernetes
+
+where <BRANCH> can be an official release tag, such as
+
+* 4.0.0-ONAP for Dublin
+* 5.0.1-ONAP for El Alto
+* 6.0.0 for Frankfurt
+* 7.0.0 for Guilin
+* 8.0.0 for Honolulu
+* 9.0.0 for Istanbul
+* 10.0.0 for Jakarta
+* 11.0.0 for Kohn
+
+**Step 2.** Install Helm Plugins required to deploy ONAP::
+
+ > cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins
+ > helm plugin install https://github.com/chartmuseum/helm-push.git \
+ --version 0.9.0
+
+.. note::
+ The ``--version 0.9.0`` is required as new version of helm (3.7.0 and up) is
+ now using ``push`` directly and helm-push is using ``cm-push`` starting
+ version ``0.10.0`` and up.
+
+**Step 3.** Install Chartmuseum::
+
+ > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum
+ > chmod +x ./chartmuseum
+ > mv ./chartmuseum /usr/local/bin
+
+**Step 4.** Install Cert-Manager::
+
+ > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml
+
+More details can be found :doc:`here <oom_setup_paas>`.
+
+**Step 4.1** Install Strimzi Kafka Operator:
+
+- Add the helm repo::
+
+ > helm repo add strimzi https://strimzi.io/charts/
+
+- Install the operator::
+
+ > helm install strimzi-kafka-operator strimzi/strimzi-kafka-operator --namespace strimzi-system --version 0.28.0 --set watchAnyNamespace=true --create-namespace
+
+More details can be found :doc:`here <oom_setup_paas>`.
+
+**Step 5.** Customize the Helm charts like `oom/kubernetes/onap/values.yaml` or
+an override file like `onap-all.yaml`, `onap-vfw.yaml` or `openstack.yaml` file
+to suit your deployment with items like the OpenStack tenant information.
+
+.. note::
+ Standard and example override files (e.g. `onap-all.yaml`, `openstack.yaml`)
+ can be found in the `oom/kubernetes/onap/resources/overrides/` directory.
+
+
+ a. You may want to selectively enable or disable ONAP components by changing
+ the ``enabled: true/false`` flags.
+
+
+ b. Encrypt the OpenStack password using the shell tool for Robot and put it in
+ the Robot Helm charts or Robot section of `openstack.yaml`
+
+
+ c. Encrypt the OpenStack password using the java based script for SO Helm
+ charts or SO section of `openstack.yaml`.
+
+
+ d. Update the OpenStack parameters that will be used by Robot, SO and APPC Helm
+ charts or use an override file to replace them.
+
+ e. Add in the command line a value for the global master password
+ (global.masterPassword).
+
+
+
+a. Enabling/Disabling Components:
+Here is an example of the nominal entries that need to be provided.
+We have different values file available for different contexts.
+
+.. literalinclude:: ../kubernetes/onap/values.yaml
+ :language: yaml
+
+
+b. Generating ROBOT Encrypted Password:
+The Robot encrypted Password uses the same encryption.key as SO but an
+openssl algorithm that works with the python based Robot Framework.
+
+.. note::
+ To generate Robot ``openStackEncryptedPasswordHere``::
+
+ cd so/resources/config/mso/
+ /oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p``
+
+c. Generating SO Encrypted Password:
+The SO Encrypted Password uses a java based encryption utility since the
+Java encryption library is not easy to integrate with openssl/python that
+Robot uses in Dublin and upper versions.
+
+.. note::
+ To generate SO ``openStackEncryptedPasswordHere`` and ``openStackSoEncryptedPassword``
+ ensure `default-jdk` is installed::
+
+ apt-get update; apt-get install default-jdk
+
+ Then execute::
+
+ SO_ENCRYPTION_KEY=`cat ~/oom/kubernetes/so/resources/config/mso/encryption.key`
+ OS_PASSWORD=XXXX_OS_CLEARTESTPASSWORD_XXXX
+
+ git clone http://gerrit.onap.org/r/integration
+ cd integration/deployment/heat/onap-rke/scripts
+
+ javac Crypto.java
+ java Crypto "$OS_PASSWORD" "$SO_ENCRYPTION_KEY"
+
+d. Update the OpenStack parameters:
+
+There are assumptions in the demonstration VNF Heat templates about the
+networking available in the environment. To get the most value out of these
+templates and the automation that can help confirm the setup is correct, please
+observe the following constraints.
+
+
+``openStackPublicNetId:``
+ This network should allow Heat templates to add interfaces.
+ This need not be an external network, floating IPs can be assigned to the
+ ports on the VMs that are created by the heat template but its important that
+ neutron allow ports to be created on them.
+
+``openStackPrivateNetCidr: "10.0.0.0/16"``
+ This ip address block is used to assign OA&M addresses on VNFs to allow ONAP
+ connectivity. The demonstration Heat templates assume that 10.0 prefix can be
+ used by the VNFs and the demonstration ip addressing plan embodied in the
+ preload template prevent conflicts when instantiating the various VNFs. If
+ you need to change this, you will need to modify the preload data in the
+ Robot Helm chart like integration_preload_parameters.py and the
+ demo/heat/preload_data in the Robot container. The size of the CIDR should
+ be sufficient for ONAP and the VMs you expect to create.
+
+``openStackOamNetworkCidrPrefix: "10.0"``
+ This ip prefix mush match the openStackPrivateNetCidr and is a helper
+ variable to some of the Robot scripts for demonstration. A production
+ deployment need not worry about this setting but for the demonstration VNFs
+ the ip asssignment strategy assumes 10.0 ip prefix.
+
+Example Keystone v2.0
+
+.. literalinclude:: yaml/example-integration-override.yaml
+ :language: yaml
+
+Example Keystone v3 (required for Rocky and later releases)
+
+.. literalinclude:: yaml/example-integration-override-v3.yaml
+ :language: yaml
+
+
+**Step 6.** To setup a local Helm server to server up the ONAP charts::
+
+ > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 &
+
+Note the port number that is listed and use it in the Helm repo add as
+follows::
+
+ > helm repo add local http://127.0.0.1:8879
+
+**Step 7.** Verify your Helm repository setup with::
+
+ > helm repo list
+ NAME URL
+ local http://127.0.0.1:8879
+
+**Step 8.** Build a local Helm repository (from the kubernetes directory)::
+
+ > make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] all ; make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] onap
+
+`HELM_BIN`
+ Sets the helm binary to be used. The default value use helm from PATH
+
+
+**Step 9.** Display the onap charts that available to be deployed::
+
+ > helm repo update
+ > helm search repo onap
+
+.. literalinclude:: helm/helm-search.txt
+
+.. note::
+ The setup of the Helm repository is a one time activity. If you make changes
+ to your deployment charts or values be sure to use ``make`` to update your
+ local Helm repository.
+
+**Step 10.** Once the repo is setup, installation of ONAP can be done with a
+single command
+
+.. note::
+ The ``--timeout 900s`` is currently required in Dublin and later
+ versions up to address long running initialization tasks for DMaaP
+ and SO. Without this timeout value both applications may fail to
+ deploy.
+
+.. danger::
+ We've added the master password on the command line.
+ You shouldn't put it in a file for safety reason
+ please don't forget to change the value to something random
+
+ A space is also added in front of the command so "history" doesn't catch it.
+ This masterPassword is very sensitive, please be careful!
+
+
+To deploy all ONAP applications use this command::
+
+ > cd oom/kubernetes
+ > helm deploy dev local/onap --namespace onap --create-namespace --set global.masterPassword=myAwesomePasswordThatINeedToChange -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/environment.yaml -f onap/resources/overrides/openstack.yaml --timeout 900s
+
+All override files may be customized (or replaced by other overrides) as per
+needs.
+
+`onap-all.yaml`
+ Enables the modules in the ONAP deployment. As ONAP is very modular, it is
+ possible to customize ONAP and disable some components through this
+ configuration file.
+
+`onap-all-ingress-nginx-vhost.yaml`
+ Alternative version of the `onap-all.yaml` but with global ingress controller
+ enabled. It requires the cluster configured with the nginx ingress controller
+ and load balancer. Please use this file instead `onap-all.yaml` if you want
+ to use experimental ingress controller feature.
+
+`environment.yaml`
+ Includes configuration values specific to the deployment environment.
+
+ Example: adapt readiness and liveness timers to the level of performance of
+ your infrastructure
+
+`openstack.yaml`
+ Includes all the OpenStack related information for the default target tenant
+ you want to use to deploy VNFs from ONAP and/or additional parameters for the
+ embedded tests.
+
+**Step 11.** Verify ONAP installation
+
+Use the following to monitor your deployment and determine when ONAP is ready
+for use::
+
+ > kubectl get pods -n onap -o=wide
+
+.. note::
+ While all pods may be in a Running state, it is not a guarantee that all
+ components are running fine.
+
+ Launch the healthcheck tests using Robot to verify that the components are
+ healthy::
+
+ > ~/oom/kubernetes/robot/ete-k8s.sh onap health
+
+**Step 12.** Undeploy ONAP
+::
+
+ > helm undeploy dev
+
+More examples of using the deploy and undeploy plugins can be found here:
+https://wiki.onap.org/display/DW/OOM+Helm+%28un%29Deploy+plugins
diff --git a/docs/archived/oom_setup_kubernetes_rancher.rst b/docs/archived/oom_setup_kubernetes_rancher.rst
new file mode 100644
index 0000000000..767b93925e
--- /dev/null
+++ b/docs/archived/oom_setup_kubernetes_rancher.rst
@@ -0,0 +1,531 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2018-2020 Amdocs, Bell Canada, Orange, Samsung
+
+.. Links
+.. _HELM Best Practices Guide: https://docs.helm.sh/chart_best_practices/#requirements
+.. _kubectl Cheat Sheet: https://kubernetes.io/docs/reference/kubectl/cheatsheet/
+.. _Kubernetes documentation for emptyDir: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir
+.. _Docker DevOps: https://wiki.onap.org/display/DW/Docker+DevOps#DockerDevOps-DockerBuild
+.. _http://cd.onap.info:30223/mso/logging/debug: http://cd.onap.info:30223/mso/logging/debug
+.. _Onboarding and Distributing a Vendor Software Product: https://wiki.onap.org/pages/viewpage.action?pageId=1018474
+.. _README.md: https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/README.md
+
+.. figure:: images/oom_logo/oomLogoV2-medium.png
+ :align: right
+
+.. _onap-on-kubernetes-with-rancher:
+
+ONAP on HA Kubernetes Cluster
+#############################
+
+This guide provides instructions on how to setup a Highly-Available Kubernetes
+Cluster. For this, we are hosting our cluster on OpenStack VMs and using the
+Rancher Kubernetes Engine (RKE) to deploy and manage our Kubernetes Cluster.
+
+.. contents::
+ :depth: 1
+ :local:
+..
+
+The result at the end of this tutorial will be:
+
+#. Creation of a Key Pair to use with Open Stack and RKE
+
+#. Creation of OpenStack VMs to host Kubernetes Control Plane
+
+#. Creation of OpenStack VMs to host Kubernetes Workers
+
+#. Installation and configuration of RKE to setup an HA Kubernetes
+
+#. Installation and configuration of kubectl
+
+#. Installation and configuration of Helm
+
+#. Creation of an NFS Server to be used by ONAP as shared persistance
+
+There are many ways one can execute the above steps. Including automation
+through the use of HEAT to setup the OpenStack VMs. To better illustrate the
+steps involved, we have captured the manual creation of such an environment
+using the ONAP Wind River Open Lab.
+
+Create Key Pair
+===============
+A Key Pair is required to access the created OpenStack VMs and will be used by
+RKE to configure the VMs for Kubernetes.
+
+Use an existing key pair, import one or create a new one to assign.
+
+.. image:: images/keys/key_pair_1.png
+
+.. Note::
+ If you're creating a new Key Pair, ensure to create a local copy of the
+ Private Key through the use of "Copy Private Key to Clipboard".
+
+For the purpose of this guide, we will assume a new local key called "onap-key"
+has been downloaded and is copied into **~/.ssh/**, from which it can be
+referenced.
+
+Example::
+
+ > mv onap-key ~/.ssh
+
+ > chmod 600 ~/.ssh/onap-key
+
+
+Create Network
+==============
+
+An internal network is required in order to deploy our VMs that will host
+Kubernetes.
+
+.. image:: images/network/network_1.png
+
+.. image:: images/network/network_2.png
+
+.. image:: images/network/network_3.png
+
+.. Note::
+ It's better to have one network per deployment and obviously the name of this
+ network should be unique.
+
+Now we need to create a router to attach this network to outside:
+
+.. image:: images/network/network_4.png
+
+Create Security Group
+=====================
+
+A specific security group is also required
+
+.. image:: images/sg/sg_1.png
+
+then click on `manage rules` of the newly created security group.
+And finally click on `Add Rule` and create the following one:
+
+.. image:: images/sg/sg_2.png
+
+.. Note::
+ the security is clearly not good here and the right SG will be proposed in a
+ future version
+
+Create Kubernetes Control Plane VMs
+===================================
+
+The following instructions describe how to create 3 OpenStack VMs to host the
+Highly-Available Kubernetes Control Plane.
+ONAP workloads will not be scheduled on these Control Plane nodes.
+
+Launch new VM instances
+-----------------------
+
+.. image:: images/cp_vms/control_plane_1.png
+
+Select Ubuntu 18.04 as base image
+---------------------------------
+Select "No" for "Create New Volume"
+
+.. image:: images/cp_vms/control_plane_2.png
+
+Select Flavor
+-------------
+The recommended flavor is at least 4 vCPU and 8GB ram.
+
+.. image:: images/cp_vms/control_plane_3.png
+
+Networking
+----------
+
+Use the created network:
+
+.. image:: images/cp_vms/control_plane_4.png
+
+Security Groups
+---------------
+
+Use the created security group:
+
+.. image:: images/cp_vms/control_plane_5.png
+
+Key Pair
+--------
+Assign the key pair that was created/selected previously (e.g. onap_key).
+
+.. image:: images/cp_vms/control_plane_6.png
+
+Apply customization script for Control Plane VMs
+------------------------------------------------
+
+Click :download:`openstack-k8s-controlnode.sh <shell/openstack-k8s-controlnode.sh>`
+to download the script.
+
+.. literalinclude:: shell/openstack-k8s-controlnode.sh
+ :language: bash
+
+This customization script will:
+
+* update ubuntu
+* install docker
+
+.. image:: images/cp_vms/control_plane_7.png
+
+Launch Instance
+---------------
+
+.. image:: images/cp_vms/control_plane_8.png
+
+
+
+Create Kubernetes Worker VMs
+============================
+The following instructions describe how to create OpenStack VMs to host the
+Highly-Available Kubernetes Workers. ONAP workloads will only be scheduled on
+these nodes.
+
+Launch new VM instances
+-----------------------
+
+The number and size of Worker VMs is dependent on the size of the ONAP
+deployment. By default, all ONAP applications are deployed. It's possible to
+customize the deployment and enable a subset of the ONAP applications. For the
+purpose of this guide, however, we will deploy 12 Kubernetes Workers that have
+been sized to handle the entire ONAP application workload.
+
+.. image:: images/wk_vms/worker_1.png
+
+Select Ubuntu 18.04 as base image
+---------------------------------
+Select "No" on "Create New Volume"
+
+.. image:: images/wk_vms/worker_2.png
+
+Select Flavor
+-------------
+The size of Kubernetes hosts depend on the size of the ONAP deployment
+being installed.
+
+If a small subset of ONAP applications are being deployed
+(i.e. for testing purposes), then 16GB or 32GB may be sufficient.
+
+.. image:: images/wk_vms/worker_3.png
+
+Networking
+-----------
+
+.. image:: images/wk_vms/worker_4.png
+
+Security Group
+---------------
+
+.. image:: images/wk_vms/worker_5.png
+
+Key Pair
+--------
+Assign the key pair that was created/selected previously (e.g. onap_key).
+
+.. image:: images/wk_vms/worker_6.png
+
+Apply customization script for Kubernetes VM(s)
+-----------------------------------------------
+
+Click :download:`openstack-k8s-workernode.sh <shell/openstack-k8s-workernode.sh>` to
+download the script.
+
+.. literalinclude:: shell/openstack-k8s-workernode.sh
+ :language: bash
+
+This customization script will:
+
+* update ubuntu
+* install docker
+* install nfs common
+
+
+Launch Instance
+---------------
+
+.. image:: images/wk_vms/worker_7.png
+
+
+
+
+Assign Floating IP addresses
+----------------------------
+Assign Floating IPs to all Control Plane and Worker VMs.
+These addresses provide external access to the VMs and will be used by RKE
+to configure kubernetes on to the VMs.
+
+Repeat the following for each VM previously created:
+
+.. image:: images/floating_ips/floating_1.png
+
+Resulting floating IP assignments in this example.
+
+.. image:: images/floating_ips/floating_2.png
+
+
+
+
+Configure Rancher Kubernetes Engine (RKE)
+=========================================
+
+Install RKE
+-----------
+Download and install RKE on a VM, desktop or laptop.
+Binaries can be found here for Linux and Mac: https://github.com/rancher/rke/releases/tag/v1.0.6
+
+.. note::
+ There are several ways to install RKE. Further parts of this documentation
+ assumes that you have rke command available.
+ If you don't know how to install RKE you may follow the below steps:
+
+ * chmod +x ./rke_linux-amd64
+ * sudo mv ./rke_linux-amd64 /user/local/bin/rke
+
+RKE requires a *cluster.yml* as input. An example file is show below that
+describes a Kubernetes cluster that will be mapped onto the OpenStack VMs
+created earlier in this guide.
+
+Click :download:`cluster.yml <yaml/cluster.yml>` to download the
+configuration file.
+
+.. literalinclude:: yaml/cluster.yml
+ :language: yaml
+
+Prepare cluster.yml
+-------------------
+Before this configuration file can be used the external **address**
+and the **internal_address** must be mapped for each control and worker node
+in this file.
+
+Run RKE
+-------
+From within the same directory as the cluster.yml file, simply execute::
+
+ > rke up
+
+The output will look something like::
+
+ INFO[0000] Initiating Kubernetes cluster
+ INFO[0000] [certificates] Generating admin certificates and kubeconfig
+ INFO[0000] Successfully Deployed state file at [./cluster.rkestate]
+ INFO[0000] Building Kubernetes cluster
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.82]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.249]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.74]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.85]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.238]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.89]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.5.11]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.90]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.244]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.5.165]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.126]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.111]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.5.160]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.5.191]
+ INFO[0000] [dialer] Setup tunnel for host [10.12.6.195]
+ INFO[0002] [network] Deploying port listener containers
+ INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.85]
+ INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89]
+ INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.90]
+ INFO[0011] [network] Successfully pulled image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89]
+ . . . .
+ INFO[0309] [addons] Setting up Metrics Server
+ INFO[0309] [addons] Saving ConfigMap for addon rke-metrics-addon to Kubernetes
+ INFO[0309] [addons] Successfully saved ConfigMap for addon rke-metrics-addon to Kubernetes
+ INFO[0309] [addons] Executing deploy job rke-metrics-addon
+ INFO[0315] [addons] Metrics Server deployed successfully
+ INFO[0315] [ingress] Setting up nginx ingress controller
+ INFO[0315] [addons] Saving ConfigMap for addon rke-ingress-controller to Kubernetes
+ INFO[0316] [addons] Successfully saved ConfigMap for addon rke-ingress-controller to Kubernetes
+ INFO[0316] [addons] Executing deploy job rke-ingress-controller
+ INFO[0322] [ingress] ingress controller nginx deployed successfully
+ INFO[0322] [addons] Setting up user addons
+ INFO[0322] [addons] no user addons defined
+ INFO[0322] Finished building Kubernetes cluster successfully
+
+Install Kubectl
+===============
+
+Download and install kubectl. Binaries can be found here for Linux and Mac:
+
+https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/linux/amd64/kubectl
+https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/darwin/amd64/kubectl
+
+You only need to install kubectl where you'll launch Kubernetes command. This
+can be any machines of the Kubernetes cluster or a machine that has IP access
+to the APIs.
+Usually, we use the first controller as it has also access to internal
+Kubernetes services, which can be convenient.
+
+Validate deployment
+-------------------
+
+::
+
+ > mkdir -p ~/.kube
+
+ > cp kube_config_cluster.yml ~/.kube/config.onap
+
+ > export KUBECONFIG=~/.kube/config.onap
+
+ > kubectl config use-context onap
+
+ > kubectl get nodes -o=wide
+
+::
+
+ NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+ onap-control-1 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.8 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-control-2 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.11 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-control-3 Ready controlplane,etcd 3h53m v1.15.2 10.0.0.12 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-1 Ready worker 3h53m v1.15.2 10.0.0.14 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-10 Ready worker 3h53m v1.15.2 10.0.0.16 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-11 Ready worker 3h53m v1.15.2 10.0.0.18 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-12 Ready worker 3h53m v1.15.2 10.0.0.7 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-2 Ready worker 3h53m v1.15.2 10.0.0.26 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-3 Ready worker 3h53m v1.15.2 10.0.0.5 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-4 Ready worker 3h53m v1.15.2 10.0.0.6 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-5 Ready worker 3h53m v1.15.2 10.0.0.9 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-6 Ready worker 3h53m v1.15.2 10.0.0.17 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-7 Ready worker 3h53m v1.15.2 10.0.0.20 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-8 Ready worker 3h53m v1.15.2 10.0.0.10 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+ onap-k8s-9 Ready worker 3h53m v1.15.2 10.0.0.4 <none> Ubuntu 18.04 LTS 4.15.0-22-generic docker://18.9.5
+
+
+Install Helm
+============
+
+Example Helm client install on Linux::
+
+ > wget https://get.helm.sh/helm-v2.16.6-linux-amd64.tar.gz
+
+ > tar -zxvf helm-v2.16.6-linux-amd64.tar.gz
+
+ > sudo mv linux-amd64/helm /usr/local/bin/helm
+
+Initialize Kubernetes Cluster for use by Helm
+---------------------------------------------
+
+::
+
+ > kubectl -n kube-system create serviceaccount tiller
+
+ > kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+
+ > helm init --service-account tiller
+
+ > kubectl -n kube-system  rollout status deploy/tiller-deploy
+
+
+
+Setting up an NFS share for Multinode Kubernetes Clusters
+=========================================================
+Deploying applications to a Kubernetes cluster requires Kubernetes nodes to
+share a common, distributed filesystem. In this tutorial, we will setup an
+NFS Master, and configure all Worker nodes a Kubernetes cluster to play
+the role of NFS slaves.
+
+It is recommended that a separate VM, outside of the kubernetes
+cluster, be used. This is to ensure that the NFS Master does not compete for
+resources with Kubernetes Control Plane or Worker Nodes.
+
+
+Launch new NFS Server VM instance
+---------------------------------
+.. image:: images/nfs_server/nfs_server_1.png
+
+Select Ubuntu 18.04 as base image
+---------------------------------
+Select "No" on "Create New Volume"
+
+.. image:: images/nfs_server/nfs_server_2.png
+
+Select Flavor
+-------------
+
+.. image:: images/nfs_server/nfs_server_3.png
+
+Networking
+-----------
+
+.. image:: images/nfs_server/nfs_server_4.png
+
+Security Group
+---------------
+
+.. image:: images/nfs_server/nfs_server_5.png
+
+Key Pair
+--------
+Assign the key pair that was created/selected previously (e.g. onap_key).
+
+.. image:: images/nfs_server/nfs_server_6.png
+
+Apply customization script for NFS Server VM
+--------------------------------------------
+
+Click :download:`openstack-nfs-server.sh <shell/openstack-nfs-server.sh>` to download
+the script.
+
+.. literalinclude:: shell/openstack-nfs-server.sh
+ :language: bash
+
+This customization script will:
+
+* update ubuntu
+* install nfs server
+
+
+Launch Instance
+---------------
+
+.. image:: images/nfs_server/nfs_server_7.png
+
+
+
+Assign Floating IP addresses
+----------------------------
+
+.. image:: images/nfs_server/nfs_server_8.png
+
+Resulting floating IP assignments in this example.
+
+.. image:: images/nfs_server/nfs_server_9.png
+
+
+To properly set up an NFS share on Master and Slave nodes, the user can run the
+scripts below.
+
+Click :download:`master_nfs_node.sh <shell/master_nfs_node.sh>` to download the
+script.
+
+.. literalinclude:: shell/master_nfs_node.sh
+ :language: bash
+
+Click :download:`slave_nfs_node.sh <shell/slave_nfs_node.sh>` to download the script.
+
+.. literalinclude:: shell/slave_nfs_node.sh
+ :language: bash
+
+The master_nfs_node.sh script runs in the NFS Master node and needs the list of
+NFS Slave nodes as input, e.g.::
+
+ > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip
+
+The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of
+the NFS Master node as input, e.g.::
+
+ > sudo ./slave_nfs_node.sh master_node_ip
+
+
+ONAP Deployment via OOM
+=======================
+Now that Kubernetes and Helm are installed and configured you can prepare to
+deploy ONAP. Follow the instructions in the README.md_ or look at the official
+documentation to get started:
+
+- :ref:`quick-start-label` - deploy ONAP on an existing cloud
+- :ref:`user-guide-label` - a guide for operators of an ONAP instance
diff --git a/docs/archived/oom_setup_paas.rst b/docs/archived/oom_setup_paas.rst
new file mode 100644
index 0000000000..2dabcb1aea
--- /dev/null
+++ b/docs/archived/oom_setup_paas.rst
@@ -0,0 +1,144 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2021 Nokia
+
+.. Links
+.. _Cert-Manager Installation documentation: https://cert-manager.io/docs/installation/kubernetes/
+.. _Cert-Manager kubectl plugin documentation: https://cert-manager.io/docs/usage/kubectl-plugin/
+.. _Strimzi Apache Kafka Operator helm Installation documentation: https://strimzi.io/docs/operators/in-development/deploying.html#deploying-cluster-operator-helm-chart-str
+
+.. _oom_setup_paas:
+
+ONAP PaaS set-up
+################
+
+Starting from Honolulu release, Cert-Manager and Prometheus Stack are a part
+of k8s PaaS for ONAP operations and can be installed to provide
+additional functionality for ONAP engineers.
+Starting from Jakarta release, Strimzi Apache Kafka is deployed to provide
+Apache kafka as the default messaging bus for ONAP.
+
+The versions of PaaS components that are supported by OOM are as follows:
+
+.. table:: ONAP PaaS components
+
+ ============== ============= ================= =======
+ Release Cert-Manager Prometheus Stack Strimzi
+ ============== ============= ================= =======
+ honolulu 1.2.0 13.x
+ istanbul 1.5.4 19.x
+ jakarta 0.28.0
+ ============== ============= ================= =======
+
+This guide provides instructions on how to install the PaaS
+components for ONAP.
+
+.. contents::
+ :depth: 1
+ :local:
+..
+
+Strimzi Apache Kafka Operator
+=============================
+
+Strimzi provides a way to run an Apache Kafka cluster on Kubernetes
+in various deployment configurations by using kubernetes operators.
+Operators are a method of packaging, deploying, and managing a
+Kubernetes application.
+Strimzi Operators extend Kubernetes functionality, automating common
+and complex tasks related to a Kafka deployment. By implementing
+knowledge of Kafka operations in code, Kafka administration
+tasks are simplified and require less manual intervention.
+
+Installation steps
+------------------
+
+The recommended version of Strimzi for Kubernetes 1.19 is v0.28.0.
+The Strimzi cluster operator is deployed using helm to install the parent chart
+containing all of the required custom resource definitions. This should be done
+by a kubernetes administrator to allow for deployment of custom resources in to
+any kubernetes namespace within the cluster.
+
+Full installation instructions can be found in the
+`Strimzi Apache Kafka Operator helm Installation documentation`_.
+
+Installation can be as simple as:
+
+- Add the helm repo::
+
+ > helm repo add strimzi https://strimzi.io/charts/
+
+- Install the operator::
+
+ > helm install strimzi-kafka-operator strimzi/strimzi-kafka-operator --namespace strimzi-system --version 0.28.0 --set watchAnyNamespace=true --create-namespace
+
+Cert-Manager
+============
+
+Cert-Manager is a native Kubernetes certificate management controller.
+It can help with issuing certificates from a variety of sources, such as
+Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, self
+signed or external issuers. It ensures certificates are valid and up to
+date, and attempt to renew certificates at a configured time before expiry.
+
+Installation steps
+------------------
+
+The recommended version of Cert-Manager for Kubernetes 1.19 is v1.5.4.
+Cert-Manager is deployed using regular YAML manifests which include all
+the needed resources (the CustomResourceDefinitions, cert-manager,
+namespace, and the webhook component).
+
+Full installation instructions, including details on how to configure extra
+functionality in Cert-Manager can be found in the
+`Cert-Manager Installation documentation`_.
+
+There is also a kubectl plugin (kubectl cert-manager) that can help you
+to manage cert-manager resources inside your cluster. For installation
+steps, please refer to `Cert-Manager kubectl plugin documentation`_.
+
+Installation can be as simple as::
+
+ > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
+
+Prometheus Stack (optional)
+===========================
+
+Prometheus is an open-source systems monitoring and alerting toolkit with
+an active ecosystem.
+
+Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana
+dashboards, and Prometheus rules combined with documentation and scripts to
+provide easy to operate end-to-end Kubernetes cluster monitoring with
+Prometheus using the Prometheus Operator. As it includes both Prometheus
+Operator and Grafana dashboards, there is no need to set up them separately.
+
+Installation steps
+------------------
+
+The recommended version of kube-prometheus-stack chart for
+Kubernetes 1.19 is 19.x (which is currently the latest major chart version),
+for example 19.0.2.
+
+In order to install Prometheus Stack, you must follow these steps:
+
+- Create the namespace for Prometheus Stack::
+
+ > kubectl create namespace prometheus
+
+- Add the prometheus-community Helm repository::
+
+ > helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+
+- Update your local Helm chart repository cache::
+
+ > helm repo update
+
+- To install the kube-prometheus-stack Helm chart in latest version::
+
+ > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus
+
+ To install the kube-prometheus-stack Helm chart in specific version, for example 19.0.2::
+
+ > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus --version=19.0.2
diff --git a/docs/archived/oom_user_guide.rst b/docs/archived/oom_user_guide.rst
new file mode 100644
index 0000000000..2ff74b5898
--- /dev/null
+++ b/docs/archived/oom_user_guide.rst
@@ -0,0 +1,798 @@
+.. This work is licensed under a Creative Commons Attribution 4.0
+.. International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2018-2021 Amdocs, Bell Canada, Orange, Samsung, Nordix Foundation
+.. _oom_user_guide:
+
+.. Links
+.. _Curated applications for Kubernetes: https://github.com/kubernetes/charts
+.. _Services: https://kubernetes.io/docs/concepts/services-networking/service/
+.. _ReplicaSet: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
+.. _StatefulSet: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/
+.. _Helm Documentation: https://docs.helm.sh/helm/
+.. _Helm: https://docs.helm.sh/
+.. _Kubernetes: https://Kubernetes.io/
+.. _Kubernetes LoadBalancer: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
+.. _user-guide-label:
+
+OOM User Guide
+##############
+
+The ONAP Operations Manager (OOM) provide the ability to manage the entire
+life-cycle of an ONAP installation, from the initial deployment to final
+decommissioning. This guide provides instructions for users of ONAP to
+use the Kubernetes_/Helm_ system as a complete ONAP management system.
+
+This guide provides many examples of Helm command line operations. For a
+complete description of these commands please refer to the `Helm
+Documentation`_.
+
+.. figure:: images/oom_logo/oomLogoV2-medium.png
+ :align: right
+
+The following sections describe the life-cycle operations:
+
+- Deploy_ - with built-in component dependency management
+- Configure_ - unified configuration across all ONAP components
+- Monitor_ - real-time health monitoring feeding to a Consul UI and Kubernetes
+- Heal_- failed ONAP containers are recreated automatically
+- Scale_ - cluster ONAP services to enable seamless scaling
+- Upgrade_ - change-out containers or configuration with little or no service
+ impact
+- Delete_ - cleanup individual containers or entire deployments
+
+.. figure:: images/oom_logo/oomLogoV2-Deploy.png
+ :align: right
+
+Deploy
+======
+
+The OOM team with assistance from the ONAP project teams, have built a
+comprehensive set of Helm charts, yaml files very similar to TOSCA files, that
+describe the composition of each of the ONAP components and the relationship
+within and between components. Using this model Helm is able to deploy all of
+ONAP with a few simple commands.
+
+Pre-requisites
+--------------
+Your environment must have the Kubernetes `kubectl` with Strimzi Apache Kafka, Cert-Manager
+and Helm setup as a one time activity.
+
+Install Kubectl
+~~~~~~~~~~~~~~~
+Enter the following to install kubectl (on Ubuntu, there are slight differences
+on other O/Ss), the Kubernetes command line interface used to manage a
+Kubernetes cluster::
+
+ > curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.19.11/bin/linux/amd64/kubectl
+ > chmod +x ./kubectl
+ > sudo mv ./kubectl /usr/local/bin/kubectl
+ > mkdir ~/.kube
+
+Paste kubectl config from Rancher (see the :ref:`cloud-setup-guide-label` for
+alternative Kubernetes environment setups) into the `~/.kube/config` file.
+
+Verify that the Kubernetes config is correct::
+
+ > kubectl get pods --all-namespaces
+
+At this point you should see Kubernetes pods running.
+
+Install Helm
+~~~~~~~~~~~~
+Helm is used by OOM for package and configuration management. To install Helm,
+enter the following::
+
+ > wget https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz
+ > tar -zxvf helm-v3.6.3-linux-amd64.tar.gz
+ > sudo mv linux-amd64/helm /usr/local/bin/helm
+
+Verify the Helm version with::
+
+ > helm version
+
+Install Strimzi Apache Kafka Operator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Details on how to install Strimzi Apache Kafka can be found
+:doc:`here <oom_setup_paas>`.
+
+Install Cert-Manager
+~~~~~~~~~~~~~~~~~~~~
+Details on how to install Cert-Manager can be found
+:doc:`here <oom_setup_paas>`.
+
+Install the Helm Repo
+---------------------
+Once kubectl and Helm are setup, one needs to setup a local Helm server to
+server up the ONAP charts::
+
+ > helm install osn/onap
+
+.. note::
+ The osn repo is not currently available so creation of a local repository is
+ required.
+
+Helm is able to use charts served up from a repository and comes setup with a
+default CNCF provided `Curated applications for Kubernetes`_ repository called
+stable which should be removed to avoid confusion::
+
+ > helm repo remove stable
+
+.. To setup the Open Source Networking Nexus repository for helm enter::
+.. > helm repo add osn 'https://nexus3.onap.org:10001/helm/helm-repo-in-nexus/master/'
+
+To prepare your system for an installation of ONAP, you'll need to::
+
+ > git clone -b kohn --recurse-submodules -j2 http://gerrit.onap.org/r/oom
+ > cd oom/kubernetes
+
+
+To install a local Helm server::
+
+ > curl -LO https://s3.amazonaws.com/chartmuseum/release/latest/bin/linux/amd64/chartmuseum
+ > chmod +x ./chartmuseum
+ > mv ./chartmuseum /usr/local/bin
+
+To setup a local Helm server to server up the ONAP charts::
+
+ > mkdir -p ~/helm3-storage
+ > chartmuseum --storage local --storage-local-rootdir ~/helm3-storage -port 8879 &
+
+Note the port number that is listed and use it in the Helm repo add as
+follows::
+
+ > helm repo add local http://127.0.0.1:8879
+
+To get a list of all of the available Helm chart repositories::
+
+ > helm repo list
+ NAME URL
+ local http://127.0.0.1:8879
+
+Then build your local Helm repository::
+
+ > make SKIP_LINT=TRUE [HELM_BIN=<HELM_PATH>] all
+
+`HELM_BIN`
+ Sets the helm binary to be used. The default value use helm from PATH
+
+The Helm search command reads through all of the repositories configured on the
+system, and looks for matches::
+
+ > helm search repo local
+ NAME VERSION DESCRIPTION
+ local/appc 11.0.0 Application Controller
+ local/clamp 11.0.0 ONAP Clamp
+ local/common 11.0.0 Common templates for inclusion in other charts
+ local/onap 11.0.0 Open Network Automation Platform (ONAP)
+ local/robot 11.0.0 A helm Chart for kubernetes-ONAP Robot
+ local/so 11.0.0 ONAP Service Orchestrator
+
+In any case, setup of the Helm repository is a one time activity.
+
+Next, install Helm Plugins required to deploy the ONAP release::
+
+ > cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins
+
+Once the repo is setup, installation of ONAP can be done with a single
+command::
+
+ > helm deploy development local/onap --namespace onap --set global.masterPassword=password
+
+This will install ONAP from a local repository in a 'development' Helm release.
+As described below, to override the default configuration values provided by
+OOM, an environment file can be provided on the command line as follows::
+
+
+
+ > helm deploy development local/onap --namespace onap -f overrides.yaml --set global.masterPassword=password
+
+.. note::
+ Refer the Configure_ section on how to update overrides.yaml and values.yaml
+
+To get a summary of the status of all of the pods (containers) running in your
+deployment::
+
+ > kubectl get pods --namespace onap -o=wide
+
+.. note::
+ The Kubernetes namespace concept allows for multiple instances of a component
+ (such as all of ONAP) to co-exist with other components in the same
+ Kubernetes cluster by isolating them entirely. Namespaces share only the
+ hosts that form the cluster thus providing isolation between production and
+ development systems as an example.
+
+.. note::
+ The Helm `--name` option refers to a release name and not a Kubernetes namespace.
+
+
+To install a specific version of a single ONAP component (`so` in this example)
+with the given release name enter::
+
+ > helm deploy so onap/so --version 11.0.0 --set global.masterPassword=password --set global.flavor=unlimited --namespace onap
+
+.. note::
+ The dependent components should be installed for component being installed
+
+
+To display details of a specific resource or group of resources type::
+
+ > kubectl describe pod so-1071802958-6twbl
+
+where the pod identifier refers to the auto-generated pod identifier.
+
+.. figure:: images/oom_logo/oomLogoV2-Configure.png
+ :align: right
+
+Configure
+=========
+
+Each project within ONAP has its own configuration data generally consisting
+of: environment variables, configuration files, and database initial values.
+Many technologies are used across the projects resulting in significant
+operational complexity and an inability to apply global parameters across the
+entire ONAP deployment. OOM solves this problem by introducing a common
+configuration technology, Helm charts, that provide a hierarchical
+configuration with the ability to override values with higher
+level charts or command line options.
+
+The structure of the configuration of ONAP is shown in the following diagram.
+Note that key/value pairs of a parent will always take precedence over those
+of a child. Also note that values set on the command line have the highest
+precedence of all.
+
+.. graphviz::
+
+ digraph config {
+ {
+ node [shape=folder]
+ oValues [label="values.yaml"]
+ demo [label="onap-demo.yaml"]
+ prod [label="onap-production.yaml"]
+ oReq [label="Chart.yaml"]
+ soValues [label="values.yaml"]
+ soReq [label="Chart.yaml"]
+ mdValues [label="values.yaml"]
+ }
+ {
+ oResources [label="resources"]
+ }
+ onap -> oResources
+ onap -> oValues
+ oResources -> environments
+ oResources -> oReq
+ oReq -> so
+ environments -> demo
+ environments -> prod
+ so -> soValues
+ so -> soReq
+ so -> charts
+ charts -> mariadb
+ mariadb -> mdValues
+
+ }
+
+The top level onap/values.yaml file contains the values required to be set
+before deploying ONAP. Here is the contents of this file:
+
+.. include:: ../kubernetes/onap/values.yaml
+ :code: yaml
+
+One may wish to create a value file that is specific to a given deployment such
+that it can be differentiated from other deployments. For example, a
+onap-development.yaml file may create a minimal environment for development
+while onap-production.yaml might describe a production deployment that operates
+independently of the developer version.
+
+For example, if the production OpenStack instance was different from a
+developer's instance, the onap-production.yaml file may contain a different
+value for the vnfDeployment/openstack/oam_network_cidr key as shown below.
+
+.. code-block:: yaml
+
+ nsPrefix: onap
+ nodePortPrefix: 302
+ apps: consul msb mso message-router sdnc vid robot portal policy appc aai
+ sdc dcaegen2 log cli multicloud clamp vnfsdk aaf kube2msb
+ dataRootDir: /dockerdata-nfs
+
+ # docker repositories
+ repository:
+ onap: nexus3.onap.org:10001
+ oom: oomk8s
+ aai: aaionap
+ filebeat: docker.elastic.co
+
+ image:
+ pullPolicy: Never
+
+ # vnf deployment environment
+ vnfDeployment:
+ openstack:
+ ubuntu_14_image: "Ubuntu_14.04.5_LTS"
+ public_net_id: "e8f51956-00dd-4425-af36-045716781ffc"
+ oam_network_id: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
+ oam_subnet_id: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
+ oam_network_cidr: "192.168.30.0/24"
+ <...>
+
+
+To deploy ONAP with this environment file, enter::
+
+ > helm deploy local/onap -n onap -f onap/resources/environments/onap-production.yaml --set global.masterPassword=password
+
+.. include:: yaml/environments_onap_demo.yaml
+ :code: yaml
+
+When deploying all of ONAP, the dependencies section of the Chart.yaml file
+controls which and what version of the ONAP components are included.
+Here is an excerpt of this file:
+
+.. code-block:: yaml
+
+ dependencies:
+ <...>
+ - name: so
+ version: ~11.0.0
+ repository: '@local'
+ condition: so.enabled
+ <...>
+
+The ~ operator in the `so` version value indicates that the latest "10.X.X"
+version of `so` shall be used thus allowing the chart to allow for minor
+upgrades that don't impact the so API; hence, version 10.0.1 will be installed
+in this case.
+
+The onap/resources/environment/dev.yaml (see the excerpt below) enables
+for fine grained control on what components are included as part of this
+deployment. By changing this `so` line to `enabled: false` the `so` component
+will not be deployed. If this change is part of an upgrade the existing `so`
+component will be shut down. Other `so` parameters and even `so` child values
+can be modified, for example the `so`'s `liveness` probe could be disabled
+(which is not recommended as this change would disable auto-healing of `so`).
+
+.. code-block:: yaml
+
+ #################################################################
+ # Global configuration overrides.
+ #
+ # These overrides will affect all helm charts (ie. applications)
+ # that are listed below and are 'enabled'.
+ #################################################################
+ global:
+ <...>
+
+ #################################################################
+ # Enable/disable and configure helm charts (ie. applications)
+ # to customize the ONAP deployment.
+ #################################################################
+ aaf:
+ enabled: false
+ <...>
+ so: # Service Orchestrator
+ enabled: true
+
+ replicaCount: 1
+
+ liveness:
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+ <...>
+
+Accessing the ONAP Portal using OOM and a Kubernetes Cluster
+------------------------------------------------------------
+
+The ONAP deployment created by OOM operates in a private IP network that isn't
+publicly accessible (i.e. OpenStack VMs with private internal network) which
+blocks access to the ONAP Portal. To enable direct access to this Portal from a
+user's own environment (a laptop etc.) the portal application's port 8989 is
+exposed through a `Kubernetes LoadBalancer`_ object.
+
+Typically, to be able to access the Kubernetes nodes publicly a public address
+is assigned. In OpenStack this is a floating IP address.
+
+When the `portal-app` chart is deployed a Kubernetes service is created that
+instantiates a load balancer. The LB chooses the private interface of one of
+the nodes as in the example below (10.0.0.4 is private to the K8s cluster only).
+Then to be able to access the portal on port 8989 from outside the K8s &
+OpenStack environment, the user needs to assign/get the floating IP address that
+corresponds to the private IP as follows::
+
+ > kubectl -n onap get services|grep "portal-app"
+ portal-app LoadBalancer 10.43.142.201 10.0.0.4 8989:30215/TCP,8006:30213/TCP,8010:30214/TCP 1d app=portal-app,release=dev
+
+
+In this example, use the 11.0.0.4 private address as a key find the
+corresponding public address which in this example is 10.12.6.155. If you're
+using OpenStack you'll do the lookup with the horizon GUI or the OpenStack CLI
+for your tenant (openstack server list). That IP is then used in your
+`/etc/hosts` to map the fixed DNS aliases required by the ONAP Portal as shown
+below::
+
+ 10.12.6.155 portal.api.simpledemo.onap.org
+ 10.12.6.155 vid.api.simpledemo.onap.org
+ 10.12.6.155 sdc.api.fe.simpledemo.onap.org
+ 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org
+ 10.12.6.155 sdc.dcae.plugin.simpledemo.onap.org
+ 10.12.6.155 portal-sdk.simpledemo.onap.org
+ 10.12.6.155 policy.api.simpledemo.onap.org
+ 10.12.6.155 aai.api.sparky.simpledemo.onap.org
+ 10.12.6.155 cli.api.simpledemo.onap.org
+ 10.12.6.155 msb.api.discovery.simpledemo.onap.org
+ 10.12.6.155 msb.api.simpledemo.onap.org
+ 10.12.6.155 clamp.api.simpledemo.onap.org
+ 10.12.6.155 so.api.simpledemo.onap.org
+ 10.12.6.155 sdc.workflow.plugin.simpledemo.onap.org
+
+Ensure you've disabled any proxy settings the browser you are using to access
+the portal and then simply access now the new ssl-encrypted URL:
+``https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm``
+
+.. note::
+ Using the HTTPS based Portal URL the Browser needs to be configured to accept
+ unsecure credentials.
+ Additionally when opening an Application inside the Portal, the Browser
+ might block the content, which requires to disable the blocking and reloading
+ of the page
+
+.. note::
+ Besides the ONAP Portal the Components can deliver additional user interfaces,
+ please check the Component specific documentation.
+
+.. note::
+
+ | Alternatives Considered:
+
+ - Kubernetes port forwarding was considered but discarded as it would
+ require the end user to run a script that opens up port forwarding tunnels
+ to each of the pods that provides a portal application widget.
+
+ - Reverting to a VNC server similar to what was deployed in the Amsterdam
+ release was also considered but there were many issues with resolution,
+ lack of volume mount, /etc/hosts dynamic update, file upload that were
+ a tall order to solve in time for the Beijing release.
+
+ Observations:
+
+ - If you are not using floating IPs in your Kubernetes deployment and
+ directly attaching a public IP address (i.e. by using your public provider
+ network) to your K8S Node VMs' network interface, then the output of
+ 'kubectl -n onap get services | grep "portal-app"'
+ will show your public IP instead of the private network's IP. Therefore,
+ you can grab this public IP directly (as compared to trying to find the
+ floating IP first) and map this IP in /etc/hosts.
+
+.. figure:: images/oom_logo/oomLogoV2-Monitor.png
+ :align: right
+
+Monitor
+=======
+
+All highly available systems include at least one facility to monitor the
+health of components within the system. Such health monitors are often used as
+inputs to distributed coordination systems (such as etcd, Zookeeper, or Consul)
+and monitoring systems (such as Nagios or Zabbix). OOM provides two mechanisms
+to monitor the real-time health of an ONAP deployment:
+
+- a Consul GUI for a human operator or downstream monitoring systems and
+ Kubernetes liveness probes that enable automatic healing of failed
+ containers, and
+- a set of liveness probes which feed into the Kubernetes manager which
+ are described in the Heal section.
+
+Within ONAP, Consul is the monitoring system of choice and deployed by OOM in
+two parts:
+
+- a three-way, centralized Consul server cluster is deployed as a highly
+ available monitor of all of the ONAP components, and
+- a number of Consul agents.
+
+The Consul server provides a user interface that allows a user to graphically
+view the current health status of all of the ONAP components for which agents
+have been created - a sample from the ONAP Integration labs follows:
+
+.. figure:: images/consul/consulHealth.png
+ :align: center
+
+To see the real-time health of a deployment go to: ``http://<kubernetes IP>:30270/ui/``
+where a GUI much like the following will be found:
+
+.. note::
+ If Consul GUI is not accessible, you can refer this
+ `kubectl port-forward <https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/>`_ method to access an application
+
+.. figure:: images/oom_logo/oomLogoV2-Heal.png
+ :align: right
+
+Heal
+====
+
+The ONAP deployment is defined by Helm charts as mentioned earlier. These Helm
+charts are also used to implement automatic recoverability of ONAP components
+when individual components fail. Once ONAP is deployed, a "liveness" probe
+starts checking the health of the components after a specified startup time.
+
+Should a liveness probe indicate a failed container it will be terminated and a
+replacement will be started in its place - containers are ephemeral. Should the
+deployment specification indicate that there are one or more dependencies to
+this container or component (for example a dependency on a database) the
+dependency will be satisfied before the replacement container/component is
+started. This mechanism ensures that, after a failure, all of the ONAP
+components restart successfully.
+
+To test healing, the following command can be used to delete a pod::
+
+ > kubectl delete pod [pod name] -n [pod namespace]
+
+One could then use the following command to monitor the pods and observe the
+pod being terminated and the service being automatically healed with the
+creation of a replacement pod::
+
+ > kubectl get pods --all-namespaces -o=wide
+
+.. figure:: images/oom_logo/oomLogoV2-Scale.png
+ :align: right
+
+Scale
+=====
+
+Many of the ONAP components are horizontally scalable which allows them to
+adapt to expected offered load. During the Beijing release scaling is static,
+that is during deployment or upgrade a cluster size is defined and this cluster
+will be maintained even in the presence of faults. The parameter that controls
+the cluster size of a given component is found in the values.yaml file for that
+component. Here is an excerpt that shows this parameter:
+
+.. code-block:: yaml
+
+ # default number of instances
+ replicaCount: 1
+
+In order to change the size of a cluster, an operator could use a helm upgrade
+(described in detail in the next section) as follows::
+
+ > helm upgrade [RELEASE] [CHART] [flags]
+
+The RELEASE argument can be obtained from the following command::
+
+ > helm list
+
+Below is the example for the same::
+
+ > helm list
+ NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
+ dev 1 Wed Oct 14 13:49:52 2020 DEPLOYED onap-11.0.0 Kohn onap
+ dev-cassandra 5 Thu Oct 15 14:45:34 2020 DEPLOYED cassandra-11.0.0 onap
+ dev-contrib 1 Wed Oct 14 13:52:53 2020 DEPLOYED contrib-11.0.0 onap
+ dev-mariadb-galera 1 Wed Oct 14 13:55:56 2020 DEPLOYED mariadb-galera-11.0.0 onap
+
+Here the Name column shows the RELEASE NAME, In our case we want to try the
+scale operation on cassandra, thus the RELEASE NAME would be dev-cassandra.
+
+Now we need to obtain the chart name for cassandra. Use the below
+command to get the chart name::
+
+ > helm search cassandra
+
+Below is the example for the same::
+
+ > helm search cassandra
+ NAME CHART VERSION APP VERSION DESCRIPTION
+ local/cassandra 11.0.0 ONAP cassandra
+ local/portal-cassandra 11.0.0 Portal cassandra
+ local/aaf-cass 11.0.0 ONAP AAF cassandra
+ local/sdc-cs 11.0.0 ONAP Service Design and Creation Cassandra
+
+Here the Name column shows the chart name. As we want to try the scale
+operation for cassandra, thus the corresponding chart name is local/cassandra
+
+
+Now we have both the command's arguments, thus we can perform the
+scale operation for cassandra as follows::
+
+ > helm upgrade dev-cassandra local/cassandra --set replicaCount=3
+
+Using this command we can scale up or scale down the cassandra db instances.
+
+
+The ONAP components use Kubernetes provided facilities to build clustered,
+highly available systems including: Services_ with load-balancers, ReplicaSet_,
+and StatefulSet_. Some of the open-source projects used by the ONAP components
+directly support clustered configurations, for example ODL and MariaDB Galera.
+
+The Kubernetes Services_ abstraction to provide a consistent access point for
+each of the ONAP components, independent of the pod or container architecture
+of that component. For example, SDN-C uses OpenDaylight clustering with a
+default cluster size of three but uses a Kubernetes service to and change the
+number of pods in this abstract this cluster from the other ONAP components
+such that the cluster could change size and this change is isolated from the
+other ONAP components by the load-balancer implemented in the ODL service
+abstraction.
+
+A ReplicaSet_ is a construct that is used to describe the desired state of the
+cluster. For example 'replicas: 3' indicates to Kubernetes that a cluster of 3
+instances is the desired state. Should one of the members of the cluster fail,
+a new member will be automatically started to replace it.
+
+Some of the ONAP components many need a more deterministic deployment; for
+example to enable intra-cluster communication. For these applications the
+component can be deployed as a Kubernetes StatefulSet_ which will maintain a
+persistent identifier for the pods and thus a stable network id for the pods.
+For example: the pod names might be web-0, web-1, web-{N-1} for N 'web' pods
+with corresponding DNS entries such that intra service communication is simple
+even if the pods are physically distributed across multiple nodes. An example
+of how these capabilities can be used is described in the Running Consul on
+Kubernetes tutorial.
+
+.. figure:: images/oom_logo/oomLogoV2-Upgrade.png
+ :align: right
+
+Upgrade
+=======
+
+Helm has built-in capabilities to enable the upgrade of pods without causing a
+loss of the service being provided by that pod or pods (if configured as a
+cluster). As described in the OOM Developer's Guide, ONAP components provide
+an abstracted 'service' end point with the pods or containers providing this
+service hidden from other ONAP components by a load balancer. This capability
+is used during upgrades to allow a pod with a new image to be added to the
+service before removing the pod with the old image. This 'make before break'
+capability ensures minimal downtime.
+
+Prior to doing an upgrade, determine of the status of the deployed charts::
+
+ > helm list
+ NAME REVISION UPDATED STATUS CHART NAMESPACE
+ so 1 Mon Feb 5 10:05:22 2020 DEPLOYED so-11.0.0 onap
+
+When upgrading a cluster a parameter controls the minimum size of the cluster
+during the upgrade while another parameter controls the maximum number of nodes
+in the cluster. For example, SNDC configured as a 3-way ODL cluster might
+require that during the upgrade no fewer than 2 pods are available at all times
+to provide service while no more than 5 pods are ever deployed across the two
+versions at any one time to avoid depleting the cluster of resources. In this
+scenario, the SDNC cluster would start with 3 old pods then Kubernetes may add
+a new pod (3 old, 1 new), delete one old (2 old, 1 new), add two new pods (2
+old, 3 new) and finally delete the 2 old pods (3 new). During this sequence
+the constraints of the minimum of two pods and maximum of five would be
+maintained while providing service the whole time.
+
+Initiation of an upgrade is triggered by changes in the Helm charts. For
+example, if the image specified for one of the pods in the SDNC deployment
+specification were to change (i.e. point to a new Docker image in the nexus3
+repository - commonly through the change of a deployment variable), the
+sequence of events described in the previous paragraph would be initiated.
+
+For example, to upgrade a container by changing configuration, specifically an
+environment value::
+
+ > helm upgrade so onap/so --version 11.0.1 --set enableDebug=true
+
+Issuing this command will result in the appropriate container being stopped by
+Kubernetes and replaced with a new container with the new environment value.
+
+To upgrade a component to a new version with a new configuration file enter::
+
+ > helm upgrade so onap/so --version 11.0.1 -f environments/demo.yaml
+
+To fetch release history enter::
+
+ > helm history so
+ REVISION UPDATED STATUS CHART DESCRIPTION
+ 1 Mon Jul 5 10:05:22 2022 SUPERSEDED so-11.0.0 Install complete
+ 2 Mon Jul 5 10:10:55 2022 DEPLOYED so-11.0.1 Upgrade complete
+
+Unfortunately, not all upgrades are successful. In recognition of this the
+lineup of pods within an ONAP deployment is tagged such that an administrator
+may force the ONAP deployment back to the previously tagged configuration or to
+a specific configuration, say to jump back two steps if an incompatibility
+between two ONAP components is discovered after the two individual upgrades
+succeeded.
+
+This rollback functionality gives the administrator confidence that in the
+unfortunate circumstance of a failed upgrade the system can be rapidly brought
+back to a known good state. This process of rolling upgrades while under
+service is illustrated in this short YouTube video showing a Zero Downtime
+Upgrade of a web application while under a 10 million transaction per second
+load.
+
+For example, to roll-back back to previous system revision enter::
+
+ > helm rollback so 1
+
+ > helm history so
+ REVISION UPDATED STATUS CHART DESCRIPTION
+ 1 Mon Jul 5 10:05:22 2022 SUPERSEDED so-11.0.0 Install complete
+ 2 Mon Jul 5 10:10:55 2022 SUPERSEDED so-11.0.1 Upgrade complete
+ 3 Mon Jul 5 10:14:32 2022 DEPLOYED so-11.0.0 Rollback to 1
+
+.. note::
+
+ The description field can be overridden to document actions taken or include
+ tracking numbers.
+
+Many of the ONAP components contain their own databases which are used to
+record configuration or state information. The schemas of these databases may
+change from version to version in such a way that data stored within the
+database needs to be migrated between versions. If such a migration script is
+available it can be invoked during the upgrade (or rollback) by Container
+Lifecycle Hooks. Two such hooks are available, PostStart and PreStop, which
+containers can access by registering a handler against one or both. Note that
+it is the responsibility of the ONAP component owners to implement the hook
+handlers - which could be a shell script or a call to a specific container HTTP
+endpoint - following the guidelines listed on the Kubernetes site. Lifecycle
+hooks are not restricted to database migration or even upgrades but can be used
+anywhere specific operations need to be taken during lifecycle operations.
+
+OOM uses Helm K8S package manager to deploy ONAP components. Each component is
+arranged in a packaging format called a chart - a collection of files that
+describe a set of k8s resources. Helm allows for rolling upgrades of the ONAP
+component deployed. To upgrade a component Helm release you will need an
+updated Helm chart. The chart might have modified, deleted or added values,
+deployment yamls, and more. To get the release name use::
+
+ > helm ls
+
+To easily upgrade the release use::
+
+ > helm upgrade [RELEASE] [CHART]
+
+To roll back to a previous release version use::
+
+ > helm rollback [flags] [RELEASE] [REVISION]
+
+For example, to upgrade the onap-so helm release to the latest SO container
+release v1.1.2:
+
+- Edit so values.yaml which is part of the chart
+- Change "so: nexus3.onap.org:10001/openecomp/so:v1.1.1" to
+ "so: nexus3.onap.org:10001/openecomp/so:v1.1.2"
+- From the chart location run::
+
+ > helm upgrade onap-so
+
+The previous so pod will be terminated and a new so pod with an updated so
+container will be created.
+
+.. figure:: images/oom_logo/oomLogoV2-Delete.png
+ :align: right
+
+Delete
+======
+
+Existing deployments can be partially or fully removed once they are no longer
+needed. To minimize errors it is recommended that before deleting components
+from a running deployment the operator perform a 'dry-run' to display exactly
+what will happen with a given command prior to actually deleting anything.
+For example::
+
+ > helm undeploy onap --dry-run
+
+will display the outcome of deleting the 'onap' release from the
+deployment.
+To completely delete a release and remove it from the internal store enter::
+
+ > helm undeploy onap
+
+Once complete undeploy is done then delete the namespace as well
+using following command::
+
+ > kubectl delete namespace <name of namespace>
+
+.. note::
+ You need to provide the namespace name which you used during deployment,
+ below is the example::
+
+ > kubectl delete namespace onap
+
+One can also remove individual components from a deployment by changing the
+ONAP configuration values. For example, to remove `so` from a running
+deployment enter::
+
+ > helm undeploy onap-so
+
+will remove `so` as the configuration indicates it's no longer part of the
+deployment. This might be useful if a one wanted to replace just `so` by
+installing a custom version.
diff --git a/docs/archived/shell/master_nfs_node.sh b/docs/archived/shell/master_nfs_node.sh
new file mode 100644
index 0000000000..32574c9f29
--- /dev/null
+++ b/docs/archived/shell/master_nfs_node.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+usage () {
+ echo "Usage:"
+ echo " ./$(basename $0) node1_ip node2_ip ... nodeN_ip"
+ exit 1
+}
+
+if [ "$#" -lt 1 ]; then
+ echo "Missing NFS slave nodes"
+ usage
+fi
+
+#Install NFS kernel
+sudo apt-get update
+sudo apt-get install -y nfs-kernel-server
+
+#Create /dockerdata-nfs and set permissions
+sudo mkdir -p /dockerdata-nfs
+sudo chmod 777 -R /dockerdata-nfs
+sudo chown nobody:nogroup /dockerdata-nfs/
+
+#Update the /etc/exports
+NFS_EXP=""
+for i in $@; do
+ NFS_EXP="${NFS_EXP}$i(rw,sync,no_root_squash,no_subtree_check) "
+done
+echo "/dockerdata-nfs "$NFS_EXP | sudo tee -a /etc/exports
+
+#Restart the NFS service
+sudo exportfs -a
+sudo systemctl restart nfs-kernel-server
diff --git a/docs/archived/shell/openstack-k8s-controlnode.sh b/docs/archived/shell/openstack-k8s-controlnode.sh
new file mode 100644
index 0000000000..d1515a7e5f
--- /dev/null
+++ b/docs/archived/shell/openstack-k8s-controlnode.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+DOCKER_VERSION=18.09.5
+
+apt-get update
+
+curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+mkdir -p /etc/systemd/system/docker.service.d/
+cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
+[Service]
+ExecStart=
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
+EOF
+
+sudo usermod -aG docker ubuntu
+
+systemctl daemon-reload
+systemctl restart docker
+apt-mark hold docker-ce
+
+IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}')
+HOST_NAME=$(hostname)
+
+echo "$IP_ADDR $HOST_NAME" >> /etc/hosts
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+sudo apt-get install make -y
+
+#nfs server
+sudo apt-get install nfs-kernel-server -y
+sudo mkdir -p /dockerdata-nfs
+sudo chown nobody:nogroup /dockerdata-nfs/
+
+
+exit 0
diff --git a/docs/archived/shell/openstack-k8s-workernode.sh b/docs/archived/shell/openstack-k8s-workernode.sh
new file mode 100644
index 0000000000..8b1b9e41ee
--- /dev/null
+++ b/docs/archived/shell/openstack-k8s-workernode.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+DOCKER_VERSION=18.09.5
+
+apt-get update
+
+curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+mkdir -p /etc/systemd/system/docker.service.d/
+cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
+[Service]
+ExecStart=
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
+EOF
+
+sudo usermod -aG docker ubuntu
+
+systemctl daemon-reload
+systemctl restart docker
+apt-mark hold docker-ce
+
+IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}')
+HOST_NAME=$(hostname)
+
+echo "$IP_ADDR $HOST_NAME" >> /etc/hosts
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+sudo apt-get install make -y
+
+# install nfs
+sudo apt-get install nfs-common -y
+
+
+exit 0
diff --git a/docs/archived/shell/openstack-nfs-server.sh b/docs/archived/shell/openstack-nfs-server.sh
new file mode 100644
index 0000000000..395d04f27c
--- /dev/null
+++ b/docs/archived/shell/openstack-nfs-server.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+apt-get update
+
+IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}')
+HOST_NAME=$(hostname)
+
+echo "$IP_ADDR $HOST_NAME" >> /etc/hosts
+
+sudo apt-get install make -y
+
+# nfs server
+sudo apt-get install nfs-kernel-server -y
+
+sudo mkdir -p /nfs_share
+sudo chown nobody:nogroup /nfs_share/
+
+exit 0
diff --git a/docs/archived/shell/slave_nfs_node.sh b/docs/archived/shell/slave_nfs_node.sh
new file mode 100644
index 0000000000..1035ff5ad6
--- /dev/null
+++ b/docs/archived/shell/slave_nfs_node.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+usage () {
+ echo "Usage:"
+ echo " ./$(basename $0) nfs_master_ip"
+ exit 1
+}
+
+if [ "$#" -ne 1 ]; then
+ echo "Missing NFS mater node"
+ usage
+fi
+
+MASTER_IP=$1
+
+#Install NFS common
+sudo apt-get update
+sudo apt-get install -y nfs-common
+
+#Create NFS directory
+sudo mkdir -p /dockerdata-nfs
+
+#Mount the remote NFS directory to the local one
+sudo mount $MASTER_IP:/dockerdata-nfs /dockerdata-nfs/
+echo "$MASTER_IP:/dockerdata-nfs /dockerdata-nfs nfs auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" | sudo tee -a /etc/fstab
diff --git a/docs/archived/yaml/cluster.yml b/docs/archived/yaml/cluster.yml
new file mode 100644
index 0000000000..0757e15a28
--- /dev/null
+++ b/docs/archived/yaml/cluster.yml
@@ -0,0 +1,156 @@
+# An example of an HA Kubernetes cluster for ONAP
+nodes:
+- address: 10.12.6.85
+ port: "22"
+ internal_address: 10.0.0.8
+ role:
+ - controlplane
+ - etcd
+ hostname_override: "onap-control-1"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.90
+ port: "22"
+ internal_address: 10.0.0.11
+ role:
+ - controlplane
+ - etcd
+ hostname_override: "onap-control-2"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.89
+ port: "22"
+ internal_address: 10.0.0.12
+ role:
+ - controlplane
+ - etcd
+ hostname_override: "onap-control-3"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.5.165
+ port: "22"
+ internal_address: 10.0.0.14
+ role:
+ - worker
+ hostname_override: "onap-k8s-1"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.238
+ port: "22"
+ internal_address: 10.0.0.26
+ role:
+ - worker
+ hostname_override: "onap-k8s-2"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.126
+ port: "22"
+ internal_address: 10.0.0.5
+ role:
+ - worker
+ hostname_override: "onap-k8s-3"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.5.11
+ port: "22"
+ internal_address: 10.0.0.6
+ role:
+ - worker
+ hostname_override: "onap-k8s-4"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.244
+ port: "22"
+ internal_address: 10.0.0.9
+ role:
+ - worker
+ hostname_override: "onap-k8s-5"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.249
+ port: "22"
+ internal_address: 10.0.0.17
+ role:
+ - worker
+ hostname_override: "onap-k8s-6"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.5.191
+ port: "22"
+ internal_address: 10.0.0.20
+ role:
+ - worker
+ hostname_override: "onap-k8s-7"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.111
+ port: "22"
+ internal_address: 10.0.0.10
+ role:
+ - worker
+ hostname_override: "onap-k8s-8"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.195
+ port: "22"
+ internal_address: 10.0.0.4
+ role:
+ - worker
+ hostname_override: "onap-k8s-9"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.5.160
+ port: "22"
+ internal_address: 10.0.0.16
+ role:
+ - worker
+ hostname_override: "onap-k8s-10"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.74
+ port: "22"
+ internal_address: 10.0.0.18
+ role:
+ - worker
+ hostname_override: "onap-k8s-11"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.82
+ port: "22"
+ internal_address: 10.0.0.7
+ role:
+ - worker
+ hostname_override: "onap-k8s-12"
+ user: ubuntu
+ ssh_key_path: "~/.ssh/onap-key"
+services:
+ kube-api:
+ service_cluster_ip_range: 10.43.0.0/16
+ pod_security_policy: false
+ always_pull_images: false
+ kube-controller:
+ cluster_cidr: 10.42.0.0/16
+ service_cluster_ip_range: 10.43.0.0/16
+ kubelet:
+ cluster_domain: cluster.local
+ cluster_dns_server: 10.43.0.10
+ fail_swap_on: false
+network:
+ plugin: canal
+authentication:
+ strategy: x509
+ssh_key_path: "~/.ssh/onap-key"
+ssh_agent_auth: false
+authorization:
+ mode: rbac
+ignore_docker_version: false
+kubernetes_version: "v1.15.11-rancher1-2"
+private_registries:
+- url: nexus3.onap.org:10001
+ user: docker
+ password: docker
+ is_default: true
+cluster_name: "onap"
+restore:
+ restore: false
+ snapshot_name: ""
diff --git a/docs/archived/yaml/example-integration-override-v3.yaml b/docs/archived/yaml/example-integration-override-v3.yaml
new file mode 100644
index 0000000000..a55b1c08fc
--- /dev/null
+++ b/docs/archived/yaml/example-integration-override-v3.yaml
@@ -0,0 +1,69 @@
+#################################################################
+# This override file configures openstack parameters for ONAP
+#################################################################
+robot:
+ enabled: true
+ flavor: large
+ appcUsername: "appc@appc.onap.org"
+ appcPassword: "demo123456!"
+ # KEYSTONE Version 3 Required for Rocky and beyond
+ openStackKeystoneAPIVersion: "v3"
+ # OS_AUTH_URL without the /v3 from the openstack .RC file
+ openStackKeyStoneUrl: "http://10.12.25.2:5000"
+ # tenantID=`openstack project show $tenantName | grep -w id | awk '{print $4}'`
+ # where "tenantName" is OS_PROJECT_NAME from openstack .RC file
+ openStackTenantId: "09d8566ea45e43aa974cf447ed591d77"
+ # OS_USERNAME from the openstack .RC file
+ openStackUserName: "OS_USERNAME_HERE"
+ # OS_PROJECT_DOMAIN_ID from the openstack .RC file
+ # in some environments it is a string but in other environmens it may be a numeric
+ openStackDomainId: "default"
+ # OS_USER_DOMAIN_NAME from the openstack .RC file
+ openStackUserDomain: "Default"
+ openStackProjectName: "OPENSTACK_PROJECT_NAME_HERE"
+ ubuntu14Image: "ubuntu-14-04-cloud-amd64"
+ ubuntu16Image: "ubuntu-16-04-cloud-amd64"
+ # From openstack network list output
+ openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4"
+ # From openstack network list output
+ openStackPrivateNetId: "83c84b68-80be-4990-8d7f-0220e3c6e5c8"
+ # From openstack network list output
+ openStackPrivateSubnetId: "e571c1d1-8ac0-4744-9b40-c3218d0a53a0"
+ openStackPrivateNetCidr: "10.0.0.0/16"
+ openStackOamNetworkCidrPrefix: "10.0"
+ # From openstack security group list output
+ openStackSecurityGroup: "bbe028dc-b64f-4f11-a10f-5c6d8d26dc89"
+ dcaeCollectorIp: "10.12.6.109"
+ # SSH public key
+ vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
+ demoArtifactsVersion: "1.4.0"
+ demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
+ scriptVersion: "1.4.0"
+ # rancher node IP where RKE configired
+ rancherIpAddress: "10.12.6.160"
+ config:
+ # use the python utility to encrypt the OS_PASSWORD for the OS_USERNAME
+ openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PYTHON_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
+ openStackSoEncryptedPassword: "YYYYYYYYYYYYYYYYYYYYYYYY_OPENSTACK_JAVA_PASSWORD_HERE_YYYYYYYYYYYYYYYY"
+so:
+ enabled: true
+ so-catalog-db-adapter:
+ config:
+ openStackUserName: "OS_USERNAME_HERE"
+ # OS_AUTH_URL (keep the /v3) from the openstack .RC file
+ openStackKeyStoneUrl: "http://10.12.25.2:5000/v3"
+ # use the SO Java utility to encrypt the OS_PASSWORD for the OS_USERNAME
+ openStackEncryptedPasswordHere: "YYYYYYYYYYYYYYYYYYYYYYYY_OPENSTACK_JAVA_PASSWORD_HERE_YYYYYYYYYYYYYYYY"
+appc:
+ enabled: true
+ replicaCount: 3
+ config:
+ enableClustering: true
+ openStackType: "OpenStackProvider"
+ openStackName: "OpenStack"
+ # OS_AUTH_URL from the openstack .RC file
+ openStackKeyStoneUrl: "http://10.12.25.2:5000/v3"
+ openStackServiceTenantName: "OPENSTACK_PROJECT_NAME_HERE"
+ openStackDomain: "OPEN_STACK_DOMAIN_NAME_HERE"
+ openStackUserName: "OS_USER_NAME_HERE"
+ openStackEncryptedPassword: "OPENSTACK_CLEAR_TEXT_PASSWORD_HERE"
diff --git a/docs/archived/yaml/example-integration-override.yaml b/docs/archived/yaml/example-integration-override.yaml
new file mode 100644
index 0000000000..5eeee5e2f5
--- /dev/null
+++ b/docs/archived/yaml/example-integration-override.yaml
@@ -0,0 +1,56 @@
+#################################################################
+# This override file configures openstack parameters for ONAP
+#################################################################
+appc:
+ config:
+ enableClustering: false
+ openStackType: "OpenStackProvider"
+ openStackName: "OpenStack"
+ # OS_AUTH_URL from the openstack .RC file
+ openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+ openStackServiceTenantName: "OPENSTACK_TENANTNAME_HERE"
+ # OS_USER_DOMAIN_NAME from the openstack .RC file
+ openStackDomain: "Default"
+ openStackUserName: "OPENSTACK_USERNAME_HERE"
+ openStackEncryptedPassword: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
+robot:
+ appcUsername: "appc@appc.onap.org"
+ appcPassword: "demo123456!"
+ # OS_AUTH_URL without the /v2.0 from the openstack .RC file
+ openStackKeyStoneUrl: "http://10.12.25.2:5000"
+ # From openstack network list output
+ openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4"
+ # tenantID=`openstack project show $tenantName | grep -w id | awk '{print $4}'`
+ # where "tenantName" is OS_PROJECT_NAME from openstack .RC file
+ openStackTenantId: "09d8566ea45e43aa974cf447ed591d77"
+ openStackUserName: "OPENSTACK_USERNAME_HERE"
+ ubuntu14Image: "ubuntu-14-04-cloud-amd64"
+ ubuntu16Image: "ubuntu-16-04-cloud-amd64"
+ # From openstack network list output
+ openStackPrivateNetId: "c7824f00-bef7-4864-81b9-f6c3afabd313"
+ # From openstack network list output
+ openStackPrivateSubnetId: "2a0e8888-f93e-4615-8d28-fc3d4d087fc3"
+ openStackPrivateNetCidr: "10.0.0.0/16"
+ # From openstack security group list output
+ openStackSecurityGroup: "3a7a1e7e-6d15-4264-835d-fab1ae81e8b0"
+ openStackOamNetworkCidrPrefix: "10.0"
+ # Control node IP
+ dcaeCollectorIp: "10.12.6.88"
+ # SSH public key
+ vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
+ demoArtifactsVersion: "1.4.0-SNAPSHOT"
+ demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
+ scriptVersion: "1.4.0-SNAPSHOT"
+ # rancher node IP where RKE configired
+ rancherIpAddress: "10.12.5.127"
+ config:
+ # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment
+ openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
+so:
+ # so server configuration
+ so-catalog-db-adapter:
+ config:
+ openStackUserName: "OPENSTACK_USERNAME_HERE"
+ # OS_AUTH_URL from the openstack .RC file
+ openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+ openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"