summaryrefslogtreecommitdiffstats
path: root/docs
diff options
context:
space:
mode:
Diffstat (limited to 'docs')
-rw-r--r--docs/helm-search.txt82
-rw-r--r--docs/oom_cloud_setup_guide.rst13
-rw-r--r--docs/oom_developer_guide.rst7
-rw-r--r--docs/oom_quickstart_guide.rst29
-rw-r--r--docs/oom_setup_ingress_controller.rst4
-rw-r--r--docs/oom_setup_kubernetes_rancher.rst5
-rw-r--r--docs/oom_setup_paas.rst15
-rw-r--r--docs/oom_user_guide.rst57
-rw-r--r--docs/release-notes-beijing.rst10
-rw-r--r--docs/release-notes-casablanca.rst2
-rw-r--r--docs/release-notes-dublin.rst2
-rw-r--r--docs/release-notes-elalto.rst2
-rw-r--r--docs/release-notes-honolulu.rst2
-rw-r--r--docs/requirements-docs.txt16
-rw-r--r--docs/spelling_wordlist.txt10
15 files changed, 133 insertions, 123 deletions
diff --git a/docs/helm-search.txt b/docs/helm-search.txt
index 7fa7621e30..c17cd25207 100644
--- a/docs/helm-search.txt
+++ b/docs/helm-search.txt
@@ -1,41 +1,41 @@
-NAME CHART VERSION APP VERSION DESCRIPTION
-local/onap 8.0.0 Honolulu Open Network Automation Platform (ONAP)
-local/aaf 8.0.0 ONAP Application Authorization Framework
-local/aai 8.0.0 ONAP Active and Available Inventory
-local/appc 8.0.0 Application Controller
-local/cassandra 8.0.0 ONAP cassandra
-local/cds 8.0.0 ONAP Controller Design Studio (CDS)
-local/clamp 8.0.0 ONAP Clamp
-local/cli 8.0.0 ONAP Command Line Interface
-local/common 8.0.0 Common templates for inclusion in other charts
-local/consul 8.0.0 ONAP Consul Agent
-local/contrib 8.0.0 ONAP optional tools
-local/cps 8.0.0 ONAP Configuration Persistene Service (CPS)
-local/dcaegen2 8.0.0 ONAP DCAE Gen2
-local/dgbuilder 8.0.0 D.G. Builder application
-local/dmaap 8.0.0 ONAP DMaaP components
-local/log 8.0.0 ONAP Logging ElasticStack
-local/mariadb-galera 8.0.0 Chart for MariaDB Galera cluster
-local/mongo 8.0.0 MongoDB Server
-local/msb 8.0.0 ONAP MicroServices Bus
-local/multicloud 8.0.0 ONAP multicloud broker
-local/music 8.0.0 MUSIC - Multi-site State Coordination Service
-local/mysql 8.0.0 MySQL Server
-local/nbi 8.0.0 ONAP Northbound Interface
-local/network-name-gen 8.0.0 Name Generation Micro Service
-local/nfs-provisioner 8.0.0 NFS provisioner
-local/oof 8.0.0 ONAP Optimization Framework
-local/policy 8.0.0 ONAP Policy Administration Point
-local/pomba 8.0.0 ONAP Post Orchestration Model Based Audit
-local/portal 8.0.0 ONAP Web Portal
-local/postgres 8.0.0 ONAP Postgres Server
-local/robot 8.0.0 A helm Chart for kubernetes-ONAP Robot
-local/sdc 8.0.0 Service Design and Creation Umbrella Helm charts
-local/sdnc 8.0.0 SDN Controller
-local/sdnc-prom 8.0.0 ONAP SDNC Policy Driven Ownership Management
-local/sniro-emulator 8.0.0 ONAP Mock Sniro Emulator
-local/so 8.0.0 ONAP Service Orchestrator
-local/uui 8.0.0 ONAP uui
-local/vfc 8.0.0 ONAP Virtual Function Controller (VF-C)
-local/vid 8.0.0 ONAP Virtual Infrastructure Deployment
-local/vnfsdk 8.0.0 ONAP VNF SDK
+NAME CHART VERSION APP VERSION DESCRIPTION
+local/onap 9.0.0 Istanbul Open Network Automation Platform (ONAP)
+local/aaf 9.0.0 ONAP Application Authorization Framework
+local/aai 9.0.0 ONAP Active and Available Inventory
+local/appc 9.0.0 Application Controller
+local/cassandra 9.0.0 ONAP cassandra
+local/cds 9.0.0 ONAP Controller Design Studio (CDS)
+local/clamp 9.0.0 ONAP Clamp
+local/cli 9.0.0 ONAP Command Line Interface
+local/common 9.0.0 Common templates for inclusion in other charts
+local/consul 9.0.0 ONAP Consul Agent
+local/contrib 9.0.0 ONAP optional tools
+local/cps 9.0.0 ONAP Configuration Persistene Service (CPS)
+local/dcaegen2 9.0.0 ONAP DCAE Gen2
+local/dgbuilder 9.0.0 D.G. Builder application
+local/dmaap 9.0.0 ONAP DMaaP components
+local/log 9.0.0 ONAP Logging ElasticStack
+local/mariadb-galera 9.0.0 Chart for MariaDB Galera cluster
+local/mongo 9.0.0 MongoDB Server
+local/msb 9.0.0 ONAP MicroServices Bus
+local/multicloud 9.0.0 ONAP multicloud broker
+local/music 9.0.0 MUSIC - Multi-site State Coordination Service
+local/mysql 9.0.0 MySQL Server
+local/nbi 9.0.0 ONAP Northbound Interface
+local/network-name-gen 9.0.0 Name Generation Micro Service
+local/nfs-provisioner 9.0.0 NFS provisioner
+local/oof 9.0.0 ONAP Optimization Framework
+local/policy 9.0.0 ONAP Policy Administration Point
+local/pomba 9.0.0 ONAP Post Orchestration Model Based Audit
+local/portal 9.0.0 ONAP Web Portal
+local/postgres 9.0.0 ONAP Postgres Server
+local/robot 9.0.0 A helm Chart for kubernetes-ONAP Robot
+local/sdc 9.0.0 Service Design and Creation Umbrella Helm charts
+local/sdnc 9.0.0 SDN Controller
+local/sdnc-prom 9.0.0 ONAP SDNC Policy Driven Ownership Management
+local/sniro-emulator 9.0.0 ONAP Mock Sniro Emulator
+local/so 9.0.0 ONAP Service Orchestrator
+local/uui 9.0.0 ONAP uui
+local/vfc 9.0.0 ONAP Virtual Function Controller (VF-C)
+local/vid 9.0.0 ONAP Virtual Infrastructure Deployment
+local/vnfsdk 9.0.0 ONAP VNF SDK
diff --git a/docs/oom_cloud_setup_guide.rst b/docs/oom_cloud_setup_guide.rst
index 033ba43fe4..70f5190e8a 100644
--- a/docs/oom_cloud_setup_guide.rst
+++ b/docs/oom_cloud_setup_guide.rst
@@ -56,8 +56,8 @@ The versions of Kubernetes that are supported by OOM are as follows:
el alto 1.15.2 2.14.2 1.15.2 18.09.x
frankfurt 1.15.9 2.16.6 1.15.11 18.09.x
guilin 1.15.11 2.16.10 1.15.11 18.09.x
- Honolulu 1.19.9 3.5.2 1.19.9 19.03.x
- Istanbul 1.2.0
+ honolulu 1.19.9 3.5.2 1.19.9 19.03.x 1.2.0
+ Istanbul 1.19.11 3.6.3 1.19.11 19.03.x 1.5.4
============== =========== ======= ======== ======== ============
.. note::
@@ -80,10 +80,11 @@ components that are needed will drastically reduce the requirements.
===== ===== ====== ====================
.. note::
- Kubernetes supports a maximum of 110 pods per node - configurable in the --max-pods=n setting off the
- "additional kubelet flags" box in the kubernetes template window described in 'ONAP Development - 110 pod limit Wiki'
- - this limit does not need to be modified . The use of many small
- nodes is preferred over a few larger nodes (for example 14x16GB - 8 vCores each).
+ Kubernetes supports a maximum of 110 pods per node - configurable in the
+ --max-pods=n setting off the "additional kubelet flags" box in the kubernetes
+ template window described in 'ONAP Development - 110 pod limit Wiki'
+ - this limit does not need to be modified . The use of many small nodes is
+ preferred over a few larger nodes (for example 14x16GB - 8 vCores each).
Subsets of ONAP may still be deployed on a single node.
Cloud Installation
diff --git a/docs/oom_developer_guide.rst b/docs/oom_developer_guide.rst
index 3d8cdb1128..552aea25a7 100644
--- a/docs/oom_developer_guide.rst
+++ b/docs/oom_developer_guide.rst
@@ -126,9 +126,10 @@ The top level of the ONAP charts is shown below:
└── configs
The common section of charts consists of a set of templates that assist with
-parameter substitution (`_name.tpl`, `_namespace.tpl` and others) and a set of charts
-for components used throughout ONAP. When the common components are used by other charts they
-are instantiated each time or we can deploy a shared instances for several components.
+parameter substitution (`_name.tpl`, `_namespace.tpl` and others) and a set of
+charts for components used throughout ONAP. When the common components are used
+by other charts they are instantiated each time or we can deploy a shared
+instances for several components.
All of the ONAP components have charts that follow the pattern shown below:
diff --git a/docs/oom_quickstart_guide.rst b/docs/oom_quickstart_guide.rst
index d573c94bb0..6b91b2ca42 100644
--- a/docs/oom_quickstart_guide.rst
+++ b/docs/oom_quickstart_guide.rst
@@ -27,11 +27,18 @@ where <BRANCH> can be an official release tag, such as
* 6.0.0 for Frankfurt
* 7.0.0 for Guilin
* 8.0.0 for Honolulu
+* 9.0.0 for Istanbul
**Step 2.** Install Helm Plugins required to deploy ONAP::
> cp -R ~/oom/kubernetes/helm/plugins/ ~/.local/share/helm/plugins
- > helm plugin install https://github.com/chartmuseum/helm-push.git
+ > helm plugin install https://github.com/chartmuseum/helm-push.git \
+ --version 0.9.0
+
+.. note::
+ The ``--version 0.9.0`` is required as new version of helm (3.7.0 and up) is
+ now using ``push`` directly and helm-push is using ``cm-push`` starting
+ version ``0.10.0`` and up.
**Step 3.** Install Chartmuseum::
@@ -50,8 +57,8 @@ an override file like `onap-all.yaml`, `onap-vfw.yaml` or `openstack.yaml` file
to suit your deployment with items like the OpenStack tenant information.
.. note::
- Standard and example override files (e.g. `onap-all.yaml`, `openstack.yaml`) can be found in
- the `oom/kubernetes/onap/resources/overrides/` directory.
+ Standard and example override files (e.g. `onap-all.yaml`, `openstack.yaml`)
+ can be found in the `oom/kubernetes/onap/resources/overrides/` directory.
a. You may want to selectively enable or disable ONAP components by changing
@@ -62,14 +69,15 @@ to suit your deployment with items like the OpenStack tenant information.
the Robot Helm charts or Robot section of `openstack.yaml`
- c. Encrypt the OpenStack password using the java based script for SO Helm charts
- or SO section of `openstack.yaml`.
+ c. Encrypt the OpenStack password using the java based script for SO Helm
+ charts or SO section of `openstack.yaml`.
d. Update the OpenStack parameters that will be used by Robot, SO and APPC Helm
charts or use an override file to replace them.
- e. Add in the command line a value for the global master password (global.masterPassword).
+ e. Add in the command line a value for the global master password
+ (global.masterPassword).
@@ -245,9 +253,11 @@ for use::
> kubectl get pods -n onap -o=wide
.. note::
- While all pods may be in a Running state, it is not a guarantee that all components are running fine.
+ While all pods may be in a Running state, it is not a guarantee that all
+ components are running fine.
- Launch the healthcheck tests using Robot to verify that the components are healthy::
+ Launch the healthcheck tests using Robot to verify that the components are
+ healthy::
> ~/oom/kubernetes/robot/ete-k8s.sh onap health
@@ -256,4 +266,5 @@ for use::
> helm undeploy dev
-More examples of using the deploy and undeploy plugins can be found here: https://wiki.onap.org/display/DW/OOM+Helm+%28un%29Deploy+plugins
+More examples of using the deploy and undeploy plugins can be found here:
+https://wiki.onap.org/display/DW/OOM+Helm+%28un%29Deploy+plugins
diff --git a/docs/oom_setup_ingress_controller.rst b/docs/oom_setup_ingress_controller.rst
index e49c72a2e4..cb385da141 100644
--- a/docs/oom_setup_ingress_controller.rst
+++ b/docs/oom_setup_ingress_controller.rst
@@ -139,8 +139,8 @@ MetalLB Load balancer can be easily installed using automatic install script::
> ./install-metallb-on-cluster.sh
-Configuration Ngninx ingress controller
-=======================================
+Configuration Nginx ingress controller
+======================================
After installation DNS server and ingress controller we can install and
configure ingress controller.
diff --git a/docs/oom_setup_kubernetes_rancher.rst b/docs/oom_setup_kubernetes_rancher.rst
index eea46c0e51..6272f83238 100644
--- a/docs/oom_setup_kubernetes_rancher.rst
+++ b/docs/oom_setup_kubernetes_rancher.rst
@@ -276,7 +276,8 @@ Download and install RKE on a VM, desktop or laptop.
Binaries can be found here for Linux and Mac: https://github.com/rancher/rke/releases/tag/v1.0.6
.. note::
- There are several ways to install RKE. Further parts of this documentation assumes that you have rke command available.
+ There are several ways to install RKE. Further parts of this documentation
+ assumes that you have rke command available.
If you don't know how to install RKE you may follow the below steps:
* chmod +x ./rke_linux-amd64
@@ -427,7 +428,7 @@ share a common, distributed filesystem. In this tutorial, we will setup an
NFS Master, and configure all Worker nodes a Kubernetes cluster to play
the role of NFS slaves.
-It is recommneded that a separate VM, outside of the kubernetes
+It is recommended that a separate VM, outside of the kubernetes
cluster, be used. This is to ensure that the NFS Master does not compete for
resources with Kubernetes Control Plane or Worker Nodes.
diff --git a/docs/oom_setup_paas.rst b/docs/oom_setup_paas.rst
index 845fd473e0..ed632dac36 100644
--- a/docs/oom_setup_paas.rst
+++ b/docs/oom_setup_paas.rst
@@ -16,7 +16,7 @@ Starting from Honolulu release, Cert-Manager and Prometheus Stack are a part
of k8s PaaS for ONAP operations and can be installed to provide
additional functionality for ONAP engineers.
-The versions of PaaS compoents that are supported by OOM are as follows:
+The versions of PaaS components that are supported by OOM are as follows:
.. table:: ONAP PaaS components
@@ -24,6 +24,7 @@ The versions of PaaS compoents that are supported by OOM are as follows:
Release Cert-Manager Prometheus Stack
============== ============= =================
honolulu 1.2.0 13.x
+ istanbul 1.5.4 19.x
============== ============= =================
This guide provides instructions on how to install the PaaS
@@ -46,7 +47,7 @@ date, and attempt to renew certificates at a configured time before expiry.
Installation steps
------------------
-The recommended version of Cert-Manager for Kubernetes 1.19 is v1.2.0.
+The recommended version of Cert-Manager for Kubernetes 1.19 is v1.5.4.
Cert-Manager is deployed using regular YAML manifests which include all
the needed resources (the CustomResourceDefinitions, cert-manager,
namespace, and the webhook component).
@@ -61,7 +62,7 @@ steps, please refer to `Cert-Manager kubectl plugin documentation`_.
Installation can be as simple as::
- > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml
+ > kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
Prometheus Stack (optional)
===========================
@@ -79,8 +80,8 @@ Installation steps
------------------
The recommended version of kube-prometheus-stack chart for
-Kubernetes 1.19 is 13.x (which is currently the latest major chart version),
-for example 13.3.1.
+Kubernetes 1.19 is 19.x (which is currently the latest major chart version),
+for example 19.0.2.
In order to install Prometheus Stack, you must follow these steps:
@@ -100,6 +101,6 @@ In order to install Prometheus Stack, you must follow these steps:
> helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus
- To install the kube-prometheus-stack Helm chart in specific version, for example 13.3.1::
+ To install the kube-prometheus-stack Helm chart in specific version, for example 19.0.2::
- > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus --version=13.3.1
+ > helm install prometheus prometheus-community/kube-prometheus-stack --namespace=prometheus --version=19.0.2
diff --git a/docs/oom_user_guide.rst b/docs/oom_user_guide.rst
index 3a707e25ea..2059251928 100644
--- a/docs/oom_user_guide.rst
+++ b/docs/oom_user_guide.rst
@@ -204,7 +204,7 @@ deployment::
To install a specific version of a single ONAP component (`so` in this example)
with the given release name enter::
- > helm deploy so onap/so --version 8.0.0 --set global.masterPassword=password --set global.flavor=unlimited --namespace onap
+ > helm deploy so onap/so --version 9.0.0 --set global.masterPassword=password --set global.flavor=unlimited --namespace onap
.. note::
The dependent components should be installed for component being installed
@@ -332,7 +332,7 @@ file:
dependencies:
<...>
- name: so
- version: ~8.0.0
+ version: ~9.0.0
repository: '@local'
condition: so.enabled
<...>
@@ -444,23 +444,24 @@ the portal and then simply access now the new ssl-encrypted URL:
| Alternatives Considered:
- - Kubernetes port forwarding was considered but discarded as it would require
- the end user to run a script that opens up port forwarding tunnels to each of
- the pods that provides a portal application widget.
+ - Kubernetes port forwarding was considered but discarded as it would
+ require the end user to run a script that opens up port forwarding tunnels
+ to each of the pods that provides a portal application widget.
- Reverting to a VNC server similar to what was deployed in the Amsterdam
- release was also considered but there were many issues with resolution, lack
- of volume mount, /etc/hosts dynamic update, file upload that were a tall order
- to solve in time for the Beijing release.
+ release was also considered but there were many issues with resolution,
+ lack of volume mount, /etc/hosts dynamic update, file upload that were
+ a tall order to solve in time for the Beijing release.
Observations:
- - If you are not using floating IPs in your Kubernetes deployment and directly attaching
- a public IP address (i.e. by using your public provider network) to your K8S Node
- VMs' network interface, then the output of 'kubectl -n onap get services | grep "portal-app"'
+ - If you are not using floating IPs in your Kubernetes deployment and
+ directly attaching a public IP address (i.e. by using your public provider
+ network) to your K8S Node VMs' network interface, then the output of
+ 'kubectl -n onap get services | grep "portal-app"'
will show your public IP instead of the private network's IP. Therefore,
- you can grab this public IP directly (as compared to trying to find the floating
- IP first) and map this IP in /etc/hosts.
+ you can grab this public IP directly (as compared to trying to find the
+ floating IP first) and map this IP in /etc/hosts.
.. figure:: oomLogoV2-Monitor.png
:align: right
@@ -561,15 +562,15 @@ Below is the example for the same::
> helm list
NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE
- dev 1 Wed Oct 14 13:49:52 2020 DEPLOYED onap-8.0.0 Honolulu onap
- dev-cassandra 5 Thu Oct 15 14:45:34 2020 DEPLOYED cassandra-8.0.0 onap
- dev-contrib 1 Wed Oct 14 13:52:53 2020 DEPLOYED contrib-8.0.0 onap
- dev-mariadb-galera 1 Wed Oct 14 13:55:56 2020 DEPLOYED mariadb-galera-8.0.0 onap
+ dev 1 Wed Oct 14 13:49:52 2020 DEPLOYED onap-9.0.0 Istanbul onap
+ dev-cassandra 5 Thu Oct 15 14:45:34 2020 DEPLOYED cassandra-9.0.0 onap
+ dev-contrib 1 Wed Oct 14 13:52:53 2020 DEPLOYED contrib-9.0.0 onap
+ dev-mariadb-galera 1 Wed Oct 14 13:55:56 2020 DEPLOYED mariadb-galera-9.0.0 onap
Here the Name column shows the RELEASE NAME, In our case we want to try the
scale operation on cassandra, thus the RELEASE NAME would be dev-cassandra.
-Now we need to obtain the chart name for casssandra. Use the below
+Now we need to obtain the chart name for cassandra. Use the below
command to get the chart name::
> helm search cassandra
@@ -578,21 +579,21 @@ Below is the example for the same::
> helm search cassandra
NAME CHART VERSION APP VERSION DESCRIPTION
- local/cassandra 8.0.0 ONAP cassandra
- local/portal-cassandra 8.0.0 Portal cassandra
- local/aaf-cass 8.0.0 ONAP AAF cassandra
- local/sdc-cs 8.0.0 ONAP Service Design and Creation Cassandra
+ local/cassandra 9.0.0 ONAP cassandra
+ local/portal-cassandra 9.0.0 Portal cassandra
+ local/aaf-cass 9.0.0 ONAP AAF cassandra
+ local/sdc-cs 9.0.0 ONAP Service Design and Creation Cassandra
Here the Name column shows the chart name. As we want to try the scale
-operation for cassandra, thus the correponding chart name is local/cassandra
+operation for cassandra, thus the corresponding chart name is local/cassandra
Now we have both the command's arguments, thus we can perform the
-scale opeartion for cassandra as follows::
+scale operation for cassandra as follows::
> helm upgrade dev-cassandra local/cassandra --set replicaCount=3
-Using this command we can scale up or scale down the cassadra db instances.
+Using this command we can scale up or scale down the cassandra db instances.
The ONAP components use Kubernetes provided facilities to build clustered,
@@ -643,7 +644,7 @@ Prior to doing an upgrade, determine of the status of the deployed charts::
> helm list
NAME REVISION UPDATED STATUS CHART NAMESPACE
- so 1 Mon Feb 5 10:05:22 2020 DEPLOYED so-8.0.0 onap
+ so 1 Mon Feb 5 10:05:22 2020 DEPLOYED so-9.0.0 onap
When upgrading a cluster a parameter controls the minimum size of the cluster
during the upgrade while another parameter controls the maximum number of nodes
@@ -680,7 +681,7 @@ To fetch release history enter::
> helm history so
REVISION UPDATED STATUS CHART DESCRIPTION
1 Mon Feb 5 10:05:22 2020 SUPERSEDED so-8.0.0 Install complete
- 2 Mon Feb 5 10:10:55 2020 DEPLOYED so-8.0.1 Upgrade complete
+ 2 Mon Feb 5 10:10:55 2020 DEPLOYED so-9.0.0 Upgrade complete
Unfortunately, not all upgrades are successful. In recognition of this the
lineup of pods within an ONAP deployment is tagged such that an administrator
@@ -703,7 +704,7 @@ For example, to roll-back back to previous system revision enter::
> helm history so
REVISION UPDATED STATUS CHART DESCRIPTION
1 Mon Feb 5 10:05:22 2020 SUPERSEDED so-8.0.0 Install complete
- 2 Mon Feb 5 10:10:55 2020 SUPERSEDED so-8.0.1 Upgrade complete
+ 2 Mon Feb 5 10:10:55 2020 SUPERSEDED so-9.0.0 Upgrade complete
3 Mon Feb 5 10:14:32 2020 DEPLOYED so-8.0.0 Rollback to 1
.. note::
diff --git a/docs/release-notes-beijing.rst b/docs/release-notes-beijing.rst
index 1172a086d2..84f86c100d 100644
--- a/docs/release-notes-beijing.rst
+++ b/docs/release-notes-beijing.rst
@@ -231,7 +231,7 @@ Bug
* [`OOM-514 <https://jira.onap.org/browse/OOM-514>`_] - Readiness prob fails sometimes even though the relevant pods are running
* [`OOM-539 <https://jira.onap.org/browse/OOM-539>`_] - Kube2MSB registrator doesn't support https REST service registration
* [`OOM-570 <https://jira.onap.org/browse/OOM-570>`_] - Wrong value is assigned to kube2msb AUTH_TOKEN environment variable
-* [`OOM-574 <https://jira.onap.org/browse/OOM-574>`_] - OOM configuration for robot doesnt copy heat templatese in dockerdata-nfs
+* [`OOM-574 <https://jira.onap.org/browse/OOM-574>`_] - OOM configuration for robot does not copy heat templatese in dockerdata-nfs
* [`OOM-577 <https://jira.onap.org/browse/OOM-577>`_] - Incorrect evaluation of bash command in yaml template file (portal-vnc-dep.yaml)
* [`OOM-578 <https://jira.onap.org/browse/OOM-578>`_] - Hard coded token in oom/kubernetes/kube2msb/values.yaml file
* [`OOM-589 <https://jira.onap.org/browse/OOM-589>`_] - Can not acces CLI in vnc-portal
@@ -299,7 +299,7 @@ Bug
* [`OOM-913 <https://jira.onap.org/browse/OOM-913>`_] - Consul agent pod is failing
* [`OOM-916 <https://jira.onap.org/browse/OOM-916>`_] - Used to fix testing issues related to usability
* [`OOM-918 <https://jira.onap.org/browse/OOM-918>`_] - Policy - incorrect configmap mount causes base.conf to disappear
-* [`OOM-920 <https://jira.onap.org/browse/OOM-920>`_] - Issue with CLAMP configuation
+* [`OOM-920 <https://jira.onap.org/browse/OOM-920>`_] - Issue with CLAMP configuration
* [`OOM-921 <https://jira.onap.org/browse/OOM-921>`_] - align onap/values.yaml and onap/resources/environments/dev.yaml - different /dockerdata-nfs
* [`OOM-926 <https://jira.onap.org/browse/OOM-926>`_] - Disable clustering for APP-C out-of-the-box
* [`OOM-927 <https://jira.onap.org/browse/OOM-927>`_] - Need a production grade configuration override file of ONAP deployment
@@ -316,7 +316,7 @@ Bug
* [`OOM-948 <https://jira.onap.org/browse/OOM-948>`_] - make vfc got an error
* [`OOM-951 <https://jira.onap.org/browse/OOM-951>`_] - Update APPC charts based on on changes for ccsdk and Nitrogen ODL
* [`OOM-953 <https://jira.onap.org/browse/OOM-953>`_] - switch aai haproxy/hbase repo from hub.docker.com to nexus3
-* [`OOM-958 <https://jira.onap.org/browse/OOM-958>`_] - SDC-be deployment missing environment paramter
+* [`OOM-958 <https://jira.onap.org/browse/OOM-958>`_] - SDC-be deployment missing environment parameter
* [`OOM-964 <https://jira.onap.org/browse/OOM-964>`_] - SDC Healthcheck failure on sdc-be and sdc-kb containers down
* [`OOM-968 <https://jira.onap.org/browse/OOM-968>`_] - warning on default deployment values.yaml
* [`OOM-969 <https://jira.onap.org/browse/OOM-969>`_] - oomk8s images have no Dockerfile's
@@ -330,7 +330,7 @@ Bug
* [`OOM-993 <https://jira.onap.org/browse/OOM-993>`_] - AAI model-loader.properties not in sync with project file
* [`OOM-994 <https://jira.onap.org/browse/OOM-994>`_] - DCAE cloudify controller docker image 1.1.0 N/A - use 1.2.0/1.3.0
* [`OOM-1003 <https://jira.onap.org/browse/OOM-1003>`_] - dcae-cloudify-manager chart references obsolete image version
-* [`OOM-1004 <https://jira.onap.org/browse/OOM-1004>`_] - aai-resources constantly fails due to cassanda hostname
+* [`OOM-1004 <https://jira.onap.org/browse/OOM-1004>`_] - aai-resources constantly fails due to cassandra hostname
* [`OOM-1005 <https://jira.onap.org/browse/OOM-1005>`_] - AAI Widgets not loading due to duplicate volumes
* [`OOM-1007 <https://jira.onap.org/browse/OOM-1007>`_] - Update dcae robot health check config
* [`OOM-1008 <https://jira.onap.org/browse/OOM-1008>`_] - Set default consul server replica count to 1
@@ -366,7 +366,7 @@ Bug
* [`OOM-1068 <https://jira.onap.org/browse/OOM-1068>`_] - Update SO with new AAI cert
* [`OOM-1076 <https://jira.onap.org/browse/OOM-1076>`_] - some charts still using readiness check image from amsterdam 1.x
* [`OOM-1077 <https://jira.onap.org/browse/OOM-1077>`_] - AAI resources and traversal deployment failure on non-rancher envs
-* [`OOM-1079 <https://jira.onap.org/browse/OOM-1079>`_] - Robot charts dont allow over ride of pub_key, dcae_collector_ip and dcae_collector_port
+* [`OOM-1079 <https://jira.onap.org/browse/OOM-1079>`_] - Robot charts do not allow over ride of pub_key, dcae_collector_ip and dcae_collector_port
* [`OOM-1081 <https://jira.onap.org/browse/OOM-1081>`_] - Remove component 'mock' from TOSCA deployment
* [`OOM-1082 <https://jira.onap.org/browse/OOM-1082>`_] - Wrong pv location of dcae postgres
* [`OOM-1085 <https://jira.onap.org/browse/OOM-1085>`_] - appc hostname is incorrect in url
diff --git a/docs/release-notes-casablanca.rst b/docs/release-notes-casablanca.rst
index 6a6a196b6b..6b857309aa 100644
--- a/docs/release-notes-casablanca.rst
+++ b/docs/release-notes-casablanca.rst
@@ -30,7 +30,7 @@ areas:
class provisioner
* CPU and Memory limits in Helm Charts to improve Pod placement based on
- resource availablity in Kubernetes Cluster
+ resource availability in Kubernetes Cluster
* Support of Node Selectors for Pod placement
diff --git a/docs/release-notes-dublin.rst b/docs/release-notes-dublin.rst
index 1974756ea3..7a32297210 100644
--- a/docs/release-notes-dublin.rst
+++ b/docs/release-notes-dublin.rst
@@ -27,7 +27,7 @@ Summary
**Platform Resiliency**
-* Documenation of a Highly-Available Kubernetes Cluster Deployment
+* Documentation of a Highly-Available Kubernetes Cluster Deployment
* Availability of a Default Storage Class Provisioner for improved Persistent
Storage resiliency
* Availability of a CNI reference integration for Multi-site support
diff --git a/docs/release-notes-elalto.rst b/docs/release-notes-elalto.rst
index bbbf271a5f..b4059028e5 100644
--- a/docs/release-notes-elalto.rst
+++ b/docs/release-notes-elalto.rst
@@ -26,7 +26,7 @@ Version 5.0.1 (El Alto Release)
Summary
-------
-The focus of this release was on maintanence and as such no new features were
+The focus of this release was on maintenance and as such no new features were
delivered.
A list of issues resolved in this release can be found here: https://jira.onap.org/projects/OOM/versions/10726
diff --git a/docs/release-notes-honolulu.rst b/docs/release-notes-honolulu.rst
index 0c8d81f164..59f40ec0fa 100644
--- a/docs/release-notes-honolulu.rst
+++ b/docs/release-notes-honolulu.rst
@@ -136,7 +136,7 @@ Workarounds
-----------
- `<https://github.com/bitnami/bitnami-docker-mariadb-galera/issues/35>`_
- Workaround is to generate a password with "short" strenght or pregenerate
+ Workaround is to generate a password with "short" strength or pregenerate
passwords without single quote in it. Default deployment is using "short"
password generation for mariadb.
diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt
deleted file mode 100644
index 1c1895acf6..0000000000
--- a/docs/requirements-docs.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-tox
-Sphinx
-doc8
-docutils
-setuptools
-six
-sphinx_rtd_theme>=0.4.3
-sphinxcontrib-blockdiag
-sphinxcontrib-needs<0.6.0
-sphinxcontrib-nwdiag
-sphinxcontrib-redoc
-sphinxcontrib-seqdiag
-sphinxcontrib-swaggerdoc
-sphinxcontrib-plantuml
-sphinx_bootstrap_theme
-lfdocs-conf
diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt
index c860d4aa00..5140ec258f 100644
--- a/docs/spelling_wordlist.txt
+++ b/docs/spelling_wordlist.txt
@@ -1,5 +1,6 @@
AAF
AAI
+ACL
adaptor
Adaptor
adaptors
@@ -59,6 +60,7 @@ Fcaps
Financials
geocoder
Gerrit
+Git
Github
graphSON
guestOS
@@ -141,6 +143,7 @@ Junit
JUnit
Junits
JUnits
+Karaf
keypair
Keypair
keypairs
@@ -155,6 +158,7 @@ keytool
keyValue
Kibana
Kibibytes
+kubectl
Kubernetes
LF
lifecycle
@@ -173,6 +177,7 @@ macAddress
MacAddress
macOS
Malware
+MariaDB
metadata
Metadata
microservice
@@ -217,14 +222,18 @@ onboarding
Onboarding
online
OOF
+oom
OOM
OpenDaylight
+OpenFlow
openo
OpenO
Opensource
Openstack
OpenStack
OSS
+ovs
+ovsdb
Pandoc
partitionKey
Partitionkey
@@ -272,6 +281,7 @@ refactored
Refactored
registrator
Registrator
+releng
repo
Repo
repos