summaryrefslogtreecommitdiffstats
path: root/docs/sections/services
diff options
context:
space:
mode:
authorHansen, Tony (th1395) <th1395@att.com>2023-05-23 21:27:17 +0000
committerHansen, Tony (th1395) <th1395@att.com>2023-05-31 21:01:29 +0000
commit663df2c1b8d9176094a62b00b7e80de096180621 (patch)
tree849841dc428fcfba07d9c39494f95ccd1f25da8a /docs/sections/services
parentbcd436a1be4fbe2c8ad7ce074d86b94b8ff44f4e (diff)
clean up some sphinx warnings
Change-Id: I8c2d925e8b27b4740227af0be3ab5d6f7153ec38 Signed-off-by: Hansen, Tony (th1395) <th1395@att.com> Issue-ID: DCAEGEN2-3399 Signed-off-by: Hansen, Tony (th1395) <th1395@att.com> Signed-off-by: Vijay Venkatesh Kumar <vv770d@att.com> Signed-off-by: Hansen, Tony (th1395) <th1395@att.com>
Diffstat (limited to 'docs/sections/services')
-rw-r--r--docs/sections/services/datalake-handler/index.rst8
-rw-r--r--docs/sections/services/datalake-handler/installation-helm.rst1
-rw-r--r--docs/sections/services/datalake-handler/overview.rst22
-rw-r--r--docs/sections/services/datalake-handler/userguide.rst48
-rw-r--r--docs/sections/services/dfc/architecture.rst4
-rw-r--r--docs/sections/services/dfc/certificates.rst44
-rw-r--r--docs/sections/services/dfc/configuration.rst26
-rw-r--r--docs/sections/services/dfc/consumedapis.rst2
-rw-r--r--docs/sections/services/dfc/delivery.rst1
-rw-r--r--docs/sections/services/dfc/http-notes.rst9
-rw-r--r--docs/sections/services/dfc/installation-helm.rst2
-rw-r--r--docs/sections/services/dfc/logging.rst4
-rw-r--r--docs/sections/services/dfc/troubleshooting.rst20
-rw-r--r--docs/sections/services/heartbeat-ms/architecture.rst6
-rw-r--r--docs/sections/services/heartbeat-ms/build_setup.rst36
-rw-r--r--docs/sections/services/heartbeat-ms/design.rst30
-rw-r--r--docs/sections/services/heartbeat-ms/index.rst4
-rw-r--r--docs/sections/services/heartbeat-ms/installation.rst52
-rw-r--r--docs/sections/services/heartbeat-ms/testprocedure.rst5
-rw-r--r--docs/sections/services/kpi-computation-ms/configuration.rst2
-rw-r--r--docs/sections/services/kpi-computation-ms/installation-helm.rst4
-rw-r--r--docs/sections/services/kpi-computation-ms/kpi_computation_ms_overview.rst18
-rw-r--r--docs/sections/services/mapper/SampleSnmpTrapConversion.rst56
-rw-r--r--docs/sections/services/mapper/delivery.rst4
-rw-r--r--docs/sections/services/mapper/flow.rst8
-rw-r--r--docs/sections/services/mapper/index.rst10
-rw-r--r--docs/sections/services/mapper/mappingfile.rst4
-rw-r--r--docs/sections/services/mapper/troubleshooting.rst4
-rw-r--r--docs/sections/services/pm-mapper/configuration.rst20
-rw-r--r--docs/sections/services/pm-mapper/installation-helm.rst3
-rw-r--r--docs/sections/services/pm-mapper/troubleshooting.rst2
-rw-r--r--docs/sections/services/pm-subscription-handler/installation.rst2
-rw-r--r--docs/sections/services/pm-subscription-handler/logging.rst2
-rw-r--r--docs/sections/services/pm-subscription-handler/offeredapi.rst6
-rw-r--r--docs/sections/services/pm-subscription-handler/resources/monitoring-policy.json2
-rw-r--r--docs/sections/services/prh/architecture.rst4
-rw-r--r--docs/sections/services/prh/authorization.rst10
-rw-r--r--docs/sections/services/prh/configuration.rst5
-rw-r--r--docs/sections/services/prh/delivery.rst2
-rw-r--r--docs/sections/services/prh/installation.rst7
-rw-r--r--docs/sections/services/restconf/development_info.rst6
-rw-r--r--docs/sections/services/restconf/functionality.rst6
-rw-r--r--docs/sections/services/restconf/index.rst6
-rw-r--r--docs/sections/services/serviceindex.rst8
-rw-r--r--docs/sections/services/slice-analysis-ms/installation-helm.rst4
-rw-r--r--docs/sections/services/slice-analysis-ms/runtime_configuration.rst31
-rw-r--r--docs/sections/services/slice-analysis-ms/slice_analysis_ms_overview.rst4
-rw-r--r--docs/sections/services/slice-analysis-ms/slice_analysis_ms_troubleshooting.rst14
-rw-r--r--docs/sections/services/snmptrap/administration.rst8
-rw-r--r--docs/sections/services/snmptrap/architecture.rst10
-rw-r--r--docs/sections/services/snmptrap/configuration.rst4
-rw-r--r--docs/sections/services/snmptrap/delivery.rst1
-rw-r--r--docs/sections/services/snmptrap/installation.rst21
-rw-r--r--docs/sections/services/snmptrap/logging.rst48
-rw-r--r--docs/sections/services/snmptrap/offeredapis.rst18
-rw-r--r--docs/sections/services/snmptrap/release-notes.rst11
-rw-r--r--docs/sections/services/son-handler/installation-helm.rst6
-rw-r--r--docs/sections/services/son-handler/son_handler_overview.rst12
-rw-r--r--docs/sections/services/son-handler/son_handler_troubleshooting.rst20
-rw-r--r--docs/sections/services/tcagen2-docker/configuration.rst2
-rw-r--r--docs/sections/services/tcagen2-docker/functionality.rst6
-rw-r--r--docs/sections/services/tcagen2-docker/index.rst3
-rw-r--r--docs/sections/services/tcagen2-docker/installation-helm.rst1
-rw-r--r--docs/sections/services/ves-http/architecture.rst14
-rw-r--r--docs/sections/services/ves-http/configuration.rst1
-rw-r--r--docs/sections/services/ves-http/delivery.rst2
-rw-r--r--docs/sections/services/ves-http/installation-helm.rst15
-rw-r--r--docs/sections/services/ves-http/stnd-defined-validation.rst23
-rw-r--r--docs/sections/services/ves-http/tls-authentication.rst1
-rw-r--r--docs/sections/services/ves-hv/design.rst6
-rw-r--r--docs/sections/services/ves-hv/healthcheck-and-monitoring.rst2
-rw-r--r--docs/sections/services/ves-hv/index.rst2
-rw-r--r--docs/sections/services/ves-hv/installation-helm.rst13
-rw-r--r--docs/sections/services/ves-hv/resources/base-configuration.json2
-rw-r--r--docs/sections/services/ves-hv/resources/metrics_sample_response.txt2
-rw-r--r--docs/sections/services/ves-hv/run-time-configuration.rst4
-rw-r--r--docs/sections/services/ves-hv/troubleshooting.rst2
-rw-r--r--docs/sections/services/ves-openapi-manager/architecture.rst1
-rw-r--r--docs/sections/services/ves-openapi-manager/artifacts.rst1
-rw-r--r--docs/sections/services/ves-openapi-manager/resources/artifact-no-stndDefined.yaml2
-rw-r--r--docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined-no-schemaReference.yaml2
-rw-r--r--docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined.yaml2
-rw-r--r--docs/sections/services/ves-openapi-manager/resources/schema-map-example.json2
-rw-r--r--docs/sections/services/ves-openapi-manager/resources/schema-map-invalid.json2
-rw-r--r--docs/sections/services/ves-openapi-manager/resources/schema-map.json2
-rw-r--r--docs/sections/services/ves-openapi-manager/use-cases.rst2
86 files changed, 431 insertions, 423 deletions
diff --git a/docs/sections/services/datalake-handler/index.rst b/docs/sections/services/datalake-handler/index.rst
index e4f1c905..b57b55dc 100644
--- a/docs/sections/services/datalake-handler/index.rst
+++ b/docs/sections/services/datalake-handler/index.rst
@@ -5,10 +5,10 @@
DataLake-Handler MS
===================
-**DataLake-Handler MS** is a software component of ONAP that can systematically persist the events from DMaaP into supported Big Data storage systems.
-It has a Admin UI, where a system administrator configures which Topics to be monitored, and to which data storage to store the data.
-It is also used to manage the settings of the storage and associated data analytics tool.
-The second part is the Feeder, which does the data transfer work and is horizontal scalable.
+**DataLake-Handler MS** is a software component of ONAP that can systematically persist the events from DMaaP into supported Big Data storage systems.
+It has a Admin UI, where a system administrator configures which Topics to be monitored, and to which data storage to store the data.
+It is also used to manage the settings of the storage and associated data analytics tool.
+The second part is the Feeder, which does the data transfer work and is horizontal scalable.
The third part, Data Extraction Service (DES), which will expose the data in the data storage via REST API for other ONAP components and external systems to consume.
.. image:: DL-DES.PNG
diff --git a/docs/sections/services/datalake-handler/installation-helm.rst b/docs/sections/services/datalake-handler/installation-helm.rst
index 015094cf..e52f2fa2 100644
--- a/docs/sections/services/datalake-handler/installation-helm.rst
+++ b/docs/sections/services/datalake-handler/installation-helm.rst
@@ -104,4 +104,3 @@ Datalake-Des:
+-------------------------------+------------------------------------------------+
|PG_DB | Postgress database name |
+-------------------------------+------------------------------------------------+
-
diff --git a/docs/sections/services/datalake-handler/overview.rst b/docs/sections/services/datalake-handler/overview.rst
index fc14f995..f2d361a2 100644
--- a/docs/sections/services/datalake-handler/overview.rst
+++ b/docs/sections/services/datalake-handler/overview.rst
@@ -1,6 +1,6 @@
-.. This work is licensed under a Creative Commons Attribution 4.0
- International License. http://creativecommons.org/licenses/by/4.0
-
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
+
.. _docs_Datalake_Handler_MS:
Architecture
@@ -9,12 +9,12 @@ Architecture
Background
~~~~~~~~~~
-There are large amount of data flowing among ONAP components, mostly via DMaaP and Web Services.
-For example, all events/feed collected by DCAE collectors go through DMaaP.
-DMaaP is backed by Kafka, which is a system for Publish-Subscribe,
-where data is not meant to be permanent and gets deleted after certain retention period.
+There are large amount of data flowing among ONAP components, mostly via DMaaP and Web Services.
+For example, all events/feed collected by DCAE collectors go through DMaaP.
+DMaaP is backed by Kafka, which is a system for Publish-Subscribe,
+where data is not meant to be permanent and gets deleted after certain retention period.
Kafka is not a database, means that data there is not for query.
-Though some components may store processed result into their local databases, most of the raw data will eventually lost.
+Though some components may store processed result into their local databases, most of the raw data will eventually lost.
We should provide a systematic way to store these raw data, and even the processed result,
which will serve as the source for data analytics and machine learning, providing insight to the network operation.
@@ -31,15 +31,15 @@ Note that not all data storage systems in the picture are supported. In R6, the
- Elasticsearch and Kibana
- HDFS
-Depending on demands, new systems may be added to the supported list. In the following we use the term database for the storage,
+Depending on demands, new systems may be added to the supported list. In the following we use the term database for the storage,
even though HDFS is a file system (but with simple settings, it can be treats as a database, e.g. Hive.)
-Note that once the data is stored in databases, other ONAP components and systems will directly query data from the databases,
+Note that once the data is stored in databases, other ONAP components and systems will directly query data from the databases,
without interacting with DataLake Handler.
Description
~~~~~~~~~~~
-DataLake Handler's main function is to monitor and persist data flow through DMaaP and provide a query API for other component or external services. The databases are outside of ONAP scope,
+DataLake Handler's main function is to monitor and persist data flow through DMaaP and provide a query API for other component or external services. The databases are outside of ONAP scope,
since the data is expected to be huge, and a database may be a complicated cluster consisting of thousand of nodes.
Admin UI
diff --git a/docs/sections/services/datalake-handler/userguide.rst b/docs/sections/services/datalake-handler/userguide.rst
index f1de54d0..0a9a4222 100644
--- a/docs/sections/services/datalake-handler/userguide.rst
+++ b/docs/sections/services/datalake-handler/userguide.rst
@@ -1,10 +1,14 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
+
Admin UI User Guide
-------------------
Introduction
~~~~~~~~~~~~
-DataLake Admin UI aims to provide a user-friendly dashboard to easily monitor and
-manage DataLake configurations for the involved components, ONAP topics, databases,
+DataLake Admin UI aims to provide a user-friendly dashboard to easily monitor and
+manage DataLake configurations for the involved components, ONAP topics, databases,
and 3rd-party tools. Please refer to the link to access the Admin UI portal
via http://datalake-admin-ui:30479
@@ -13,9 +17,9 @@ DataLake Feeder Management
**************************
.. image:: ./images/adminui-feeder.png
-Click the "DataLake Feeder" on the menu bar, and the dashboard will show
-the overview DataLake Feeder information, such as the numbers of topics.
-Also, you can enable or disable DataLake Feeder process backend process
+Click the "DataLake Feeder" on the menu bar, and the dashboard will show
+the overview DataLake Feeder information, such as the numbers of topics.
+Also, you can enable or disable DataLake Feeder process backend process
by using the toggle switch.
@@ -23,14 +27,14 @@ Kafka Management
****************
.. image:: ./images/adminui-kafka.png
-Click the "Kafka" on the menu bar, and it provides the kafka resource settings
-including add, modify and delete in the page to fulfill your management demand.
+Click the "Kafka" on the menu bar, and it provides the kafka resource settings
+including add, modify and delete in the page to fulfill your management demand.
.. image:: ./images/adminui-kafka-edit.png
-You can modify the kafka resource via clicking the card,
-and click the plus button to add a new Kafka resource.
-Then, you will need to fill the required information such as identifying name,
+You can modify the kafka resource via clicking the card,
+and click the plus button to add a new Kafka resource.
+Then, you will need to fill the required information such as identifying name,
message router address and zookeeper address, and so on to build it up.
@@ -41,22 +45,22 @@ Topics Management
.. image:: ./images/adminui-topic-edit2.png
.. image:: ./images/adminui-topic-edit3.png
-The Topic page lists down all the topics which you have been configured
-by topic management. You can edit the topic setting via double click the specific row.
-The setting includes DataLake feeder status - catch the topic or not,
-data format, and the numbers of time to live for the topic.
-And choose one or more Kafka items as topic resource
+The Topic page lists down all the topics which you have been configured
+by topic management. You can edit the topic setting via double click the specific row.
+The setting includes DataLake feeder status - catch the topic or not,
+data format, and the numbers of time to live for the topic.
+And choose one or more Kafka items as topic resource
and define the databased to store topic info are necessary.
.. image:: ./images/adminui-topic-config.png
-For the default configuration of Topics, you can click the "Default configurations" button
+For the default configuration of Topics, you can click the "Default configurations" button
to do the setting. When you add a new topic, these configurations will be filled into the form automatically.
.. image:: ./images/adminui-topic-new.png
-To add a new topic for the DataLake Feeder, you can click the "plus icon" button
-to catch the data into the 3rd-party database.
+To add a new topic for the DataLake Feeder, you can click the "plus icon" button
+to catch the data into the 3rd-party database.
Please be noted that only existing topics in the Kafka can be added.
@@ -65,7 +69,7 @@ Database Management
.. image:: ./images/adminui-dbs.png
.. image:: ./images/adminui-dbs-edit.png
-In the Database Management page, it allows you to add, modify and delete the database resources
+In the Database Management page, it allows you to add, modify and delete the database resources
where the message from topics will be stored.
DataLake supports a bunch of databases including Couchbase DB, Apache Druid, Elasticsearch, HDFS, and MongoDB.
@@ -83,7 +87,7 @@ Currently, DataLake supports two Tools which are Kibana and Apache Superset.
.. image:: ./images/adminui-design.png
.. image:: ./images/adminui-design-edit.png
-After setting up the 3rd-party tools, you can import the template as the JSON, YAML or other formats
-for data exploration, data visualization and dashboarding. DataLake supports Kibana dashboarding,
-Kibana searching, Kibana visualization, Elasticsearch field mapping template,
+After setting up the 3rd-party tools, you can import the template as the JSON, YAML or other formats
+for data exploration, data visualization and dashboarding. DataLake supports Kibana dashboarding,
+Kibana searching, Kibana visualization, Elasticsearch field mapping template,
and Apache Druid Kafka indexing service.
diff --git a/docs/sections/services/dfc/architecture.rst b/docs/sections/services/dfc/architecture.rst
index 75913dbb..230a3419 100644
--- a/docs/sections/services/dfc/architecture.rst
+++ b/docs/sections/services/dfc/architecture.rst
@@ -20,7 +20,7 @@ DFC will handle the collection of bulk PM data flow:
DFC is delivered as one **Docker container** which hosts application server.
See `Delivery`_ for more information about the docker container.
-.. _Delivery: ./delivery.html
+.. _Delivery: ./delivery.rst
Functionality
"""""""""""""
@@ -49,5 +49,5 @@ affect the handling of others.
Generalized DFC
""""""""""""""""
From version 1.2.1 and onwards, the DFC has more general use. Instead of only handling PM files, any kind of files
-are handled. The 'changeIdentifier' field in the FileReady VES event (which is reported from the PNFs) identifies the
+are handled. The 'changeIdentifier' field in the FileReady VES event (which is reported from the PNFs) identifies the
file type. This is mapped to a publishing stream in the DR.
diff --git a/docs/sections/services/dfc/certificates.rst b/docs/sections/services/dfc/certificates.rst
index 9c4d46b2..d272dd8e 100644
--- a/docs/sections/services/dfc/certificates.rst
+++ b/docs/sections/services/dfc/certificates.rst
@@ -21,7 +21,7 @@ keys & certificates on both vsftpd server and DFC.
1. Generate key/certificate with openssl for DFC:
-------------------------------------------------
-.. code:: bash
+.. code-block:: bash
openssl genrsa -out dfc.key 2048
openssl req -new -out dfc.csr -key dfc.key
@@ -29,7 +29,7 @@ keys & certificates on both vsftpd server and DFC.
2. Generate key & certificate with openssl for vsftpd:
------------------------------------------------------
-.. code:: bash
+.. code-block:: bash
openssl genrsa -out ftp.key 2048
openssl req -new -out ftp.csr -key ftp.key
@@ -43,20 +43,20 @@ We have two keystore files, one for TrustManager, one for KeyManager.
1. First, convert your certificate in a DER format :
- .. code:: bash
+ .. code-block:: bash
openssl x509 -outform der -in ftp.crt -out ftp.der
2. And after copy existing keystore and password from container:
- .. code:: bash
+ .. code-block:: bash
kubectl cp <DFC pod>:/opt/app/datafile/etc/cert/trust.jks trust.jks
kubectl cp <DFC pod>:/opt/app/datafile/etc/cert/trust.pass trust.pass
3. Import DER certificate in the keystore :
- .. code:: bash
+ .. code-block:: bash
keytool -import -alias ftp -keystore trust.jks -file ftp.der
@@ -66,42 +66,48 @@ We have two keystore files, one for TrustManager, one for KeyManager.
Convert x509 Cert and Key to a pkcs12 file
- .. code:: bash
+ .. code-block:: bash
openssl pkcs12 -export -in dfc.crt -inkey dfc.key -out cert.p12 -name dfc
Note: Make sure you put a password on the p12 file - otherwise you'll get a null reference exception when you try to import it.
2. Create password files for cert.p12
- .. code:: bash
- printf "[your password]" > p12.pass
+ .. code-block:: bash
+
+ printf "[your password]" > p12.pass
4. Update existing KeyStore files
---------------------------------
Copy the new trust.jks and cert.p12 and password files from local environment to the DFC container.
- .. code:: bash
- mkdir mycert
- cp cert.p12 mycert/
- cp p12.pass mycert/
- cp trust.jks mycert/
- cp trust.pass mycert/
- kubectl cp mycert/ <DFC pod>:/opt/app/datafile/etc/cert/
+ .. code-block:: bash
+
+ mkdir mycert
+ cp cert.p12 mycert/
+ cp p12.pass mycert/
+ cp trust.jks mycert/
+ cp trust.pass mycert/
+ kubectl cp mycert/ <DFC pod>:/opt/app/datafile/etc/cert/
5. Update configuration in consul
-----------------------------------
Change path in consul:
- .. code:: bash
+
+.. code-block:: bash
+
dmaap.ftpesConfig.keyCert": "/opt/app/datafile/etc/cert/mycert/cert.p12
dmaap.ftpesConfig.keyPasswordPath": "/opt/app/datafile/etc/cert/mycert/p12.pass
dmaap.ftpesConfig.trustedCa": "/opt/app/datafile/etc/cert/mycert/trust.jks
dmaap.ftpesConfig.trustedCaPasswordPath": "/opt/app/datafile/etc/cert/mycert/trust.pass
Consul's address: http://<worker external IP>:<Consul External Port>
- .. code:: bash
- kubectl -n onap get svc | grep consul
+
+ .. code-block:: bash
+
+ kubectl -n onap get svc | grep consul
.. image:: ./consule-certificate-update.png
@@ -132,4 +138,4 @@ Consul's address: http://<worker external IP>:<Consul External Port>
7. Other conditions
---------------------------------------------------------------------------
This has been tested with vsftpd and dfc, with self-signed certificates.
- In real deployment, we should use ONAP-CA signed certificate for DFC, and vendor-CA signed certificate for xNF
+ In real deployment, we should use ONAP-CA signed certificate for DFC, and vendor-CA signed certificate for xNF.
diff --git a/docs/sections/services/dfc/configuration.rst b/docs/sections/services/dfc/configuration.rst
index 69375c76..22f8d691 100644
--- a/docs/sections/services/dfc/configuration.rst
+++ b/docs/sections/services/dfc/configuration.rst
@@ -38,10 +38,10 @@ The user can also enable secure communication with the DMaaP Message Router.
DFC can handle multiple stream identifiers. For each stream identifier/feed combination the user must provide the
** stream identifier**, **feed name**, and **feed location**.
-**Note!** The **feed name** provided should be used by the consumer/s to set up the subscription to the feed.
+**Note!** The **feed name** provided should be used by the consumer/s to set up the subscription to the feed.
The **stream identifier** shall be defined as an item under the **streams_publishes** tag in the "**applicationConfig**"
-section.
+section.
.. code-block:: yaml
@@ -68,10 +68,10 @@ section.
Under this tag the internal "**feed identifier**" for the feed shall also be added to get the
info about the feed substituted in by CBS (that's what the <<>> tags are for).
-The **feed name** and **feed location** are defined as inputs for the user to provide in helm chart values.yaml. An example snapshot on default configuration is provided below.
+The **feed name** and **feed location** are defined as inputs for the user to provide in helm chart values.yaml. An example snapshot on default configuration is provided below.
.. code-block:: yaml
-
+
# DataRouter Feed Configuration
drFeedConfig:
- feedName: bulk_pm_feed
@@ -79,7 +79,7 @@ The **feed name** and **feed location** are defined as inputs for the user to pr
feedVersion: "0.0"
asprClassification: unclassified
feedDescription: DFC Feed Creation
-
+
# DataRouter Publisher Configuration
drPubConfig:
- feedName: bulk_pm_feed
@@ -95,7 +95,7 @@ Turn On/Off StrictHostChecking
**StrictHostChecking** is a SSH connection option which prevents Man in the Middle (MitM) attacks. If it is enabled, client checks HostName and public key provided by server and compares it with keys stored locally. Only if matching entry is found, SSH connection can be established.
By default in DataFile Collector this option is enabled (true) and requires to provide known_hosts list to DFC container.
-**Important: DFC requires public keys in sha-rsa KeyAlgorithm**
+**Important: DFC requires public keys in sha-rsa KeyAlgorithm**
**Known_hosts file** is a list in following format:
@@ -103,7 +103,7 @@ By default in DataFile Collector this option is enabled (true) and requires to p
<HostName/HostIP> <KeyAlgorithms> <Public Key>
-e.g:
+e.g:
.. code-block:: bash
@@ -137,7 +137,7 @@ e.g:
3. Mount newly created Config Map as Volume to DFC by editing DFC deployment. **DFC deployment contains 3 containers, pay attention to mount the file to the appropriate container.**
.. code-block:: yaml
-
+
...
kind: Deployment
metadata:
@@ -165,7 +165,7 @@ e.g:
Known_hosts file path can be controlled by Environment Variable *KNOWN_HOSTS_FILE_PATH*. Full (absolute) path has to be provided. Sample deployment with changed known_hosts file path can be seen below.
.. code-block:: yaml
-
+
...
kind: Deployment
metadata:
@@ -177,7 +177,7 @@ Known_hosts file path can be controlled by Environment Variable *KNOWN_HOSTS_FIL
spec:
containers:
- image: <DFC image>
- envs:
+ envs:
- name: KNOWN_HOSTS_FILE_PATH
value: /home/datafile/.ssh/new/path/<known_hosts file name, e.g. my_custom_keys>
...
@@ -207,7 +207,7 @@ e.g:
kubectl -n onap edit cm onap-dcae-dfc-known-hosts
-To delete and create again Config Map execute:
+To delete and create again Config Map execute:
.. code-block:: bash
@@ -226,7 +226,7 @@ To turn off StrictHostChecking, set below option to false. It could be changed i
**WARNING: such operation is not recommended as it decreases DFC security and exposes DFC to MitM attacks.**
-.. code-block:: bash
+.. code-block:: yaml
"sftp.security.strictHostKeyChecking": false
@@ -236,7 +236,7 @@ Disable TLS connection
----------------------
The TLS connection in the external interface is enabled by default. To disable TLS, use the following application property:
-.. code-block:: bash
+.. code-block:: yaml
"dmaap.certificateConfig.enableCertAuth": false
diff --git a/docs/sections/services/dfc/consumedapis.rst b/docs/sections/services/dfc/consumedapis.rst
index 258164a7..b1cf714b 100644
--- a/docs/sections/services/dfc/consumedapis.rst
+++ b/docs/sections/services/dfc/consumedapis.rst
@@ -69,4 +69,4 @@ Responses
| HTTP Code | Description |
+===========+=====================+
| **200** | successful response |
-+-----------+---------------------+ \ No newline at end of file
++-----------+---------------------+
diff --git a/docs/sections/services/dfc/delivery.rst b/docs/sections/services/dfc/delivery.rst
index b193bf30..508cc954 100644
--- a/docs/sections/services/dfc/delivery.rst
+++ b/docs/sections/services/dfc/delivery.rst
@@ -27,4 +27,3 @@ Choose your preferred settings (ssh, http or https, with or without hook) and ru
DFC deployment is handled through Helm charts under OOM repository `here`_.
.. _here: https://gerrit.onap.org/r/gitweb?p=oom.git;a=tree;f=kubernetes/dcaegen2-services/components/dcae-datafile-collector
-
diff --git a/docs/sections/services/dfc/http-notes.rst b/docs/sections/services/dfc/http-notes.rst
index c45c7bd8..0fe3a758 100644
--- a/docs/sections/services/dfc/http-notes.rst
+++ b/docs/sections/services/dfc/http-notes.rst
@@ -67,15 +67,14 @@ HTTPS connection with DFC
The file ready message for https server is the same as used in other protocols and http. The only difference is that the scheme is set to
"https":
-.. code-block:: bash
+.. code-block:: json
- ...
- "arrayOfNamedHashMap": [
+ {"arrayOfNamedHashMap": [
{
"name": "C_28532_measData_file.xml",
"hashMap": {
- "location": "https://login:password@server.com:443/file.xml.gz",
- ...
+ "location": "https://login:password@server.com:443/file.xml.gz"
+ }}]}
The processed uri depends on the https connection type that has to be established (client certificate authentication, basic
authentication, and no authentication).
diff --git a/docs/sections/services/dfc/installation-helm.rst b/docs/sections/services/dfc/installation-helm.rst
index cfef688b..62c1709f 100644
--- a/docs/sections/services/dfc/installation-helm.rst
+++ b/docs/sections/services/dfc/installation-helm.rst
@@ -71,4 +71,4 @@ Example yaml file with DataFile Collector configuration:
type: message_router
-More information about properties could be found in configuration section, see :ref:`dfc_configuration`
+More information about properties could be found in configuration section, see :ref:`dfc_configuration`.
diff --git a/docs/sections/services/dfc/logging.rst b/docs/sections/services/dfc/logging.rst
index 51395eb0..ed218515 100644
--- a/docs/sections/services/dfc/logging.rst
+++ b/docs/sections/services/dfc/logging.rst
@@ -9,9 +9,9 @@ file located in datafile-app-server/config folder.
To activate logging, please follow the instructions on this `page`_.
-.. _page: ../troubleshooting.rst
+.. _page: ./troubleshooting.rst
**Where is the log file?**
-The log file is located under /var/log/ONAP/ and called application.log. \ No newline at end of file
+The log file is located under /var/log/ONAP/ and called application.log.
diff --git a/docs/sections/services/dfc/troubleshooting.rst b/docs/sections/services/dfc/troubleshooting.rst
index 680bf1ff..3bb132de 100644
--- a/docs/sections/services/dfc/troubleshooting.rst
+++ b/docs/sections/services/dfc/troubleshooting.rst
@@ -82,7 +82,7 @@ DFC uses a number of configuration parameters. You can find below the kind of re
-Wrong trustedCaPassword:
-.. code-block:: json
+.. code-block:: none
org.onap.dcaegen2.collectors.datafile.tasks.FileCollector |2019-04-24T14:05:54.494Z |WARN |Failed to download file: PNF0 A20000626.2315+0200-2330+0200_PNF0-0-1MB.tar.gz, reason: org.onap.dcaegen2.collectors.datafile.exceptions.DatafileTaskException: Could not open connection: java.io.IOException: Keystore was tampered with, or password was incorrect |RequestID=A20000626.2315+0200-2330+0200_PNF0-0-1MB.tar.gz | | |FileCollectorWorker-2 |
\... |WARN |Failed to download file: ..., reason: org.onap.dcaegen2.collectors.datafile.exceptions.DatafileTaskException: Could not open connection: java.io.IOException: Keystore was tampered with, or password was incorrect ...
@@ -93,7 +93,7 @@ DFC uses a number of configuration parameters. You can find below the kind of re
-Wrong trustedCa:
-.. code-block:: json
+.. code-block:: none
org.onap.dcaegen2.collectors.datafile.tasks.FileCollector |2019-04-24T14:11:22.584Z |WARN |Failed to download file: PNF0 A20000626.2315+0200-2330+0200_PNF0-0-1MB.tar.gz, reason: org.onap.dcaegen2.collectors.datafile.exceptions.DatafileTaskException: Could not open connection: java.io.FileNotFoundException: **WRONGconfig/ftp.jks** |RequestID=A20000626.2315+0200-2330+0200_PNF0-0-1MB.tar.gz | | |FileCollectorWorker-2 |
\... |WARN |Failed to download file: ..., reason: org.onap.dcaegen2.collectors.datafile.exceptions.DatafileTaskException: Could not open connection: java.io.FileNotFoundException: WRONGconfig/ftp.jks ...
@@ -103,7 +103,7 @@ DFC uses a number of configuration parameters. You can find below the kind of re
-Wrong keyPassword:
-.. code-block:: json
+.. code-block:: none
org.onap.dcaegen2.collectors.datafile.tasks.FileCollector |2019-04-24T14:15:40.694Z |WARN |Failed to download file: PNF0 A20000626.2315+0200-2330+0200_PNF0-0-1MB.tar.gz, reason: org.onap.dcaegen2.collectors.datafile.exceptions.DatafileTaskException: Could not open connection: java.io.IOException: Keystore was tampered with, or password was incorrect |RequestID=A20000626.2315+0200-2330+0200_PNF0-0-1MB.tar.gz | | |FileCollectorWorker-2 |
\... |WARN |Failed to download file: ..., reason: org.onap.dcaegen2.collectors.datafile.exceptions.DatafileTaskException: Could not open connection: java.io.IOException: Keystore was tampered with, or password was incorrect ...
@@ -113,7 +113,7 @@ DFC uses a number of configuration parameters. You can find below the kind of re
-Wrong keyCert:
-.. code-block:: json
+.. code-block:: none
org.onap.dcaegen2.collectors.datafile.tasks.FileCollector |2019-04-24T14:20:46.308Z |WARN |Failed to download file: PNF0 A20000626.2315+0200-2330+0200_PNF0-0-1MB.tar.gz, reason: org.onap.dcaegen2.collectors.datafile.exceptions.DatafileTaskException: Could not open connection: java.io.FileNotFoundException: **WRONGconfig/dfc.jks (No such file or directory)** |RequestID=A20000626.2315+0200-2330+0200_PNF0-0-1MB.tar.gz | | |FileCollectorWorker-2 |
\... |WARN |Failed to download file: ..., reason: org.onap.dcaegen2.collectors.datafile.exceptions.DatafileTaskException: Could not open connection: java.io.FileNotFoundException: WRONGconfig/dfc.jks (No such file or directory) ...
@@ -123,7 +123,7 @@ DFC uses a number of configuration parameters. You can find below the kind of re
-Wrong consumer dmaapHostName:
-.. code-block:: json
+.. code-block:: none
org.onap.dcaegen2.collectors.datafile.tasks.ScheduledTasks |2019-04-24T14:27:06.578Z |ERROR |Polling for file ready message failed, exception: java.net.UnknownHostException: **WRONGlocalhost**: Try again, config: DmaapConsumerConfiguration{consumerId=C12, consumerGroup=OpenDcae-c12, timeoutMs=-1, messageLimit=1, **dmaapHostName=WRONGlocalhost**, dmaapPortNumber=2222, dmaapTopicName=/events/unauthenticated.VES_NOTIFICATION_OUTPUT, dmaapProtocol=http, dmaapUserName=, dmaapUserPassword=, dmaapContentType=application/json, trustStorePath=change it, trustStorePasswordPath=change it, keyStorePath=change it, keyStorePasswordPath=change it, enableDmaapCertAuth=false} |RequestID=90fe7450-0bc2-4bf6-a2f0-2aeef6f196ae | | |reactor-http-epoll-3 |
\... |ERROR |Polling for file ready message failed, exception: java.net.UnknownHostException: *WRONGlocalhost*, config: DmaapConsumerConfiguration{..., dmaapHostName=*WRONGlocalhost*, ...} ...
@@ -134,7 +134,7 @@ DFC uses a number of configuration parameters. You can find below the kind of re
-Wrong consumer dmaapPortNumber:
-.. code-block:: json
+.. code-block:: none
org.onap.dcaegen2.collectors.datafile.tasks.ScheduledTasks |2019-04-24T14:33:35.286Z |ERROR |Polling for file ready message failed, exception: io.netty.channel.AbstractChannel$AnnotatedConnectException: syscall:getsockopt(..) failed: Connection refused: localhost/127.0.0.1:**WRONGport**, config: DmaapConsumerConfiguration{consumerId=C12, consumerGroup=OpenDcae-c12, timeoutMs=-1, messageLimit=1, dmaapHostName=localhost, **dmaapPortNumber=WRONGport**, dmaapTopicName=/events/unauthenticated.VES_NOTIFICATION_OUTPUT, dmaapProtocol=http, dmaapUserName=, dmaapUserPassword=, dmaapContentType=application/json, trustStorePath=change it, trustStorePasswordPath=change it, keyStorePath=change it, keyStorePasswordPath=change it, enableDmaapCertAuth=false} |RequestID=b57c68fe-84bf-442f-accd-ea821a5a321f | | |reactor-http-epoll-3 |
\... |ERROR |Polling for file ready message failed, exception: io.netty.channel.AbstractChannel$AnnotatedConnectException: syscall:getsockopt(..) failed: Connection refused: localhost/127.0.0.1:*WRONGport*, config: DmaapConsumerConfiguration{..., dmaapPortNumber=*WRONGport*, ...} ...
@@ -145,7 +145,7 @@ DFC uses a number of configuration parameters. You can find below the kind of re
-Wrong consumer dmaapTopicName:
-.. code-block:: json
+.. code-block:: none
org.onap.dcaegen2.collectors.datafile.tasks.ScheduledTasks |2019-04-24T14:38:07.097Z |ERROR |Polling for file ready message failed, exception: java.lang.RuntimeException: DmaaPConsumer HTTP 404 NOT_FOUND, config: DmaapConsumerConfiguration{consumerId=C12, consumerGroup=OpenDcae-c12, timeoutMs=-1, messageLimit=1, dmaapHostName=localhost, dmaapPortNumber=2222, **dmaapTopicName=/events/unauthenticated.VES_NOTIFICATION_OUTPUTWRONG**, dmaapProtocol=http, dmaapUserName=, dmaapUserPassword=, dmaapContentType=application/json, trustStorePath=change it, trustStorePasswordPath=change it, keyStorePath=change it, keyStorePasswordPath=change it, enableDmaapCertAuth=false} |RequestID=8bd71bac-68af-494b-9518-3ab4478371cf | | |reactor-http-epoll-4 |
\... |ERROR |Polling for file ready message failed, exception: java.lang.RuntimeException: DmaaPConsumer HTTP 404 NOT_FOUND, config: DmaapConsumerConfiguration{..., dmaapTopicName=*/events/unauthenticated.VES_NOTIFICATION_OUTPUTWRONG*, ...} ...
@@ -161,10 +161,10 @@ Missing known_hosts file
""""""""""""""""""""""""
When StrictHostKeyChecking is enabled and DFC cannot find a known_hosts file, the warning information shown below is visible in the logfile. In this case, DFC acts like StrictHostKeyChecking is disabled.
-.. code-block:: bash
+.. code-block:: none
- org.onap.dcaegen2.collectors.datafile.ftp.SftpClient |2020-07-24T06:32:56.010Z
- |WARN |StrictHostKeyChecking is enabled but environment variable KNOWN_HOSTS_FILE_PATH is not set or points to not existing file [/home/datafile/.ssh/known_hosts] --> falling back to StrictHostKeyChecking='no'.
+ org.onap.dcaegen2.collectors.datafile.ftp.SftpClient |2020-07-24T06:32:56.010Z
+ |WARN |StrictHostKeyChecking is enabled but environment variable KNOWN_HOSTS_FILE_PATH is not set or points to not existing file [/home/datafile/.ssh/known_hosts] --> falling back to StrictHostKeyChecking='no'.
To resolve this warning, provide a known_hosts file or disable StrictHostKeyChecking, see DFC config page - :ref:`strict_host_checking_config`.
diff --git a/docs/sections/services/heartbeat-ms/architecture.rst b/docs/sections/services/heartbeat-ms/architecture.rst
index af96af32..16cf2d6b 100644
--- a/docs/sections/services/heartbeat-ms/architecture.rst
+++ b/docs/sections/services/heartbeat-ms/architecture.rst
@@ -9,7 +9,7 @@ configuration from CBS and parses these entries and saves them in the
postgres database having table name **vnf_table_1**. Each entry in the
configuration is for a particular eventName. Each entry has missed
heartbeat count, heartbeat interval, Control loop name etc. along with
-many other parameters.
+many other parameters.
Whenever a heartbeat event is received, the sourceName, lastEpochTime
and other information is stored in another postgres database having
@@ -38,8 +38,8 @@ function/method to download the CBS configuration.
The heartbeat microservice has 2 states
-**Reconfiguration state** – Download configuration from CBS and update
+**Reconfiguration state** - Download configuration from CBS and update
the vnf_table_1 is in progress.
-**Running state** – Normal working that comprises of receiving of HB
+**Running state** - Normal working that comprises of receiving of HB
events and sending of control loop event if required conditions are met.
diff --git a/docs/sections/services/heartbeat-ms/build_setup.rst b/docs/sections/services/heartbeat-ms/build_setup.rst
index 5df47234..e1465dcd 100644
--- a/docs/sections/services/heartbeat-ms/build_setup.rst
+++ b/docs/sections/services/heartbeat-ms/build_setup.rst
@@ -25,7 +25,7 @@ Docker build procedure
Clone the code using below command
::
- git clone --depth 1 https://gerrit.onap.org/r/dcaegen2/services/heartbeat
+ git clone --depth 1 https://gerrit.onap.org/r/dcaegen2/services/heartbeat
give executable permission to mvn-phase-script.sh if not there
already
@@ -54,10 +54,10 @@ CBS polling. The following environment variables are to be set.**
consumerID=1
If the postgres parameters are not there in environment setting file,
- then it takes the values from miss_htbt_service/config/hbproperties.yaml
- file. Make sure that postgres running in the machine where pg_ipAddress
- parameter is mentioned.
-
+ then it takes the values from miss_htbt_service/config/hbproperties.yaml
+ file. Make sure that postgres running in the machine where pg_ipAddress
+ parameter is mentioned.
+
Run below netstat command to check postgres port number and IP address are fine.
::
@@ -75,7 +75,7 @@ CBS polling. The following environment variables are to be set.**
are as follows
::
-
+
pg_ipAddress: 10.0.4.1
pg_portNum: 5432
pg_userName: postgres
@@ -89,25 +89,25 @@ CBS polling. The following environment variables are to be set.**
correctly. Usually groupID remains the same for all instance of HB
where as consumerID would be changed for each instance of HB Micro
service. If groupID and consumerID is not provided, then it takes
- “DefaultGroup” and “1” respectively.
+ "DefaultGroup" and "1" respectively.
**Setting CBS configuration parameters using the consule KV URL.**
The sample consul KV is as below.
::
-
+
http://10.12.6.50:8500/ui/#/dc1/kv/mvp-dcaegen2-heartbeat-static
Go to the above link and click on KEY/VALUE tab
Click on mvp-dcaegen2-heartbeat-static
- Copy the configuration in the box provided and click on update.
-
+ Copy the configuration in the box provided and click on update.
+
The sample configuration is as below
-
+
.. code-block:: json
-
+
{
"heartbeat_config": {
"vnfs": [{
@@ -177,7 +177,7 @@ mentioned in the above section.**
heartbeat.test1:latest
To check the logs, run below command
-
+
::
sudo Docker logs -f hb1
@@ -191,17 +191,17 @@ mentioned in the above section.**
sudo Docker ps -a \| grep heartbeat.test1
Run below commands to stop the Docker run
-
+
::
-
+
sudo Docker stop <Docker container ID)
sudo Docker rm -f hb1
**Initiate the maven build**
To run the maven build, execute any one of them.
-
-::
+
+::
sudo mvn -s settings.xml deploy
OR
@@ -211,5 +211,5 @@ mentioned in the above section.**
libxml-xpath as below. If the issue is something else, follow the
link given as part of the build failure.
-::
+::
sudo apt install libxml-xpath-perl
diff --git a/docs/sections/services/heartbeat-ms/design.rst b/docs/sections/services/heartbeat-ms/design.rst
index 837b8fc2..5618c53c 100644
--- a/docs/sections/services/heartbeat-ms/design.rst
+++ b/docs/sections/services/heartbeat-ms/design.rst
@@ -8,7 +8,7 @@ There are 4 processes created as below
Main process
------------
-
+
This is the initial process which does the following.
- Download CBS configuration and update the vnf_table_1
@@ -56,7 +56,7 @@ CBS polling process
-------------------
If the local configuration file (config/hbproperties.yaml) indicates
-that CBS polling is required, then main process would create the CBS
+that CBS polling is required, then main process would create the CBS
polling process. It does the following.
- It takes the CBS polling interval from the configuration file.
@@ -85,12 +85,12 @@ services instances, processes would work differently as mentioned below.
- Download CBS configuration and process it
- Spawns processes
- Periodically update hb_common with last accessed time to indicate that active instance is Alive.
-
+
Inactive Instance:
- Spawns processes
- Constantly check hb_common entry for last accessed time
- - If the last accessed time is more than a minute or so, then it assumes the role of active instance
-
+ - If the last accessed time is more than a minute or so, then it assumes the role of active instance
+
**HB worker process:** Both active and inactive instance behaves the sames as metnioned in the Design section.
**DB Monitoring process:** Both active periodically checks its process ID/hostname with hb_common data to know whether it is an active instance or not. If inactive instance it does nothing. If active instance, it behaves as mentioned in design section.
@@ -100,7 +100,7 @@ services instances, processes would work differently as mentioned below.
Handling of some of the failure scenarios
-----------------------------------------
-Failure to download the configuration from CBS – In this case, local
+Failure to download the configuration from CBS - In this case, local
configuration file etc/config.json is considered as the configuration
file and vnf_table_1 is updated accordingly.
@@ -119,7 +119,7 @@ Postgres Database
There are 3 tables maintained.
-**Vnf_table_1 table:**
+**Vnf_table_1 table:**
This is table is indexed by eventName. Each entry
has following parameters in it.
@@ -137,8 +137,8 @@ has following parameters in it.
- closedLoopControlName
- version
-**Vnf_table_2 table:**
-For each sourceName there would be an entry in vnf_table_2.
+**Vnf_table_2 table:**
+For each sourceName there would be an entry in vnf_table_2.
This is indexed by eventName and SourceName. Each entry has
below parameters
@@ -147,18 +147,18 @@ below parameters
- Control loop event raised flag. 0 indicates not raised, 1 indicates
CL event raised
-**hb_common table:**
+**hb_common table:**
This is a single entry table.
- The configuration status which would have one of the below.
- - **RECONFIGURATION** – indicates CBS configuration processing is in
+ - **RECONFIGURATION** - indicates CBS configuration processing is in
progress.
- - **RUNNING** – CBS configuration is completed and ready to process HB
+ - **RUNNING** - CBS configuration is completed and ready to process HB
event and send CL event.
-- The process ID – This indicates the main process ID of the active HB
+- The process ID - This indicates the main process ID of the active HB
instance which is responsible to take care of reconfiguration
-- The source Name – It has 2 parts, hostname and service name. The
+- The source Name - It has 2 parts, hostname and service name. The
hostname is the Docker container ID. The service name is the
environment variable set for SERVICE_NAME
-- The last accessed time – The time last accessed by the main process
+- The last accessed time - The time last accessed by the main process
having the above process ID.
diff --git a/docs/sections/services/heartbeat-ms/index.rst b/docs/sections/services/heartbeat-ms/index.rst
index d8a77fa5..d70493db 100644
--- a/docs/sections/services/heartbeat-ms/index.rst
+++ b/docs/sections/services/heartbeat-ms/index.rst
@@ -6,7 +6,7 @@ Heartbeat Microservice
The main objective of **Heartbeat Microservice** is to receive the periodic
heartbeat from the configured eventNames and report the loss of heartbeat
-onto DMaap if number of consecutive missed heartbeat count is more than
+onto DMaap if number of consecutive missed heartbeat count is more than
the configured missed heartbeat count
Heartbeat Microservice overview and functions
@@ -14,7 +14,7 @@ Heartbeat Microservice overview and functions
.. toctree::
:maxdepth: 1
-
+
./architecture.rst
./design.rst
./build_setup.rst
diff --git a/docs/sections/services/heartbeat-ms/installation.rst b/docs/sections/services/heartbeat-ms/installation.rst
index 62953cae..78e2239a 100644
--- a/docs/sections/services/heartbeat-ms/installation.rst
+++ b/docs/sections/services/heartbeat-ms/installation.rst
@@ -3,50 +3,32 @@
.. _heartbeat-installation:
-Installation
-============
+Helm Installation
+=================
+The Heartbeat microservice can be deployed using helm charts in the oom repository.
-Following are steps if manual deployment/undeployment required.
+Deployment steps
+~~~~~~~~~~~~~~~~
-Steps to deploy are shown below
+- Default app config values can be updated in oom/kubernetes/dcaegen2-services/components/dcae-heartbeat/values.yaml.
-- Heartbeat MS blueprint is available under bootstrap pod (under /blueprints/k8s-heartbeat.yaml). The blueprint is also maintained in gerrit and can be downloaded from https://git.onap.org/dcaegen2/platform/blueprints/tree/blueprints/k8s-heartbeat.yaml
-
-
-- Create an input file in DCAE bootstrap POD under / directory. Sample input file can be found under https://git.onap.org/dcaegen2/services/heartbeat/tree/dpo/k8s-heartbeat-inputs.yaml
+- Make the chart and deploy using the following command:
+ .. code-block:: bash
-- Enter the Bootstrap POD
-- Validate blueprint
- .. code-block:: bash
-
- cfy blueprints validate /blueprints/k8s-heartbeat.yaml
-- Upload validated blueprint
- .. code-block:: bash
-
+ cd oom/kubernetes/
+ make dcaegen2-services
+ helm install dev-dcaegen2-services dcaegen2-services --namespace <namespace> --set global.masterPassword=<password>
- cfy blueprints upload -b heartbeat /blueprints/k8s-heartbeat.yaml
-- Create deployment
- .. code-block:: bash
-
+- To deploy only RESTConf:
- cfy deployments create -b heartbeat -i /k8s-heartbeat-input.yaml heartbeat
-- Deploy blueprint
- .. code-block:: bash
-
+ .. code-block:: bash
- cfy executions start -d heartbeat install
+ helm install dev-dcae-heartbeat dcaegen2-services/components/dcae-heartbeat --namespace <namespace> --set global.masterPassword=<password>
-To undeploy heartbeat, steps are shown below
+- To Uninstall
-- Uninstall running heartbeat and delete deployment
- .. code-block:: bash
-
+ .. code-block:: bash
- cfy uninstall heartbeat
-- Delete blueprint
- .. code-block:: bash
-
-
- cfy blueprints delete heartbeat \ No newline at end of file
+ helm uninstall dev-dcae-heartbeat
diff --git a/docs/sections/services/heartbeat-ms/testprocedure.rst b/docs/sections/services/heartbeat-ms/testprocedure.rst
index c312ee51..4054ecf0 100644
--- a/docs/sections/services/heartbeat-ms/testprocedure.rst
+++ b/docs/sections/services/heartbeat-ms/testprocedure.rst
@@ -40,7 +40,7 @@ Sample output is as below
postgres=# \c hb_vnf
You are now connected to database "hb_vnf" as user "postgres".
- hb_vnf=#
+ hb_vnf=#
Delete all tables before starting Docker run or local run
---------------------------------------------------------
@@ -174,7 +174,7 @@ Testing Control loop event
- Modify the Json as below
Modify the lastEpochTime and startEpochTime with current time in Test1.json
- Modify the eventName in Test1.json to one of the eventName in vnf_table_1
+ Modify the eventName in Test1.json to one of the eventName in vnf_table_1
- Inject the Test1.json as mentioned in above section
@@ -432,4 +432,3 @@ The postgres DB also have a CL_flag set indicating control loop event with ONSET
2018-12-12 12:45:51,291 | __main__ | htbtworker | process_msg | 77 | INFO | HBT:Getting :http://10.12.5.252:3904/events/unauthenticated.SEC_HEARTBEAT_INPUT/group1/1?timeout=15000
2018-12-12 12:45:51,292 | urllib3.connectionpool | connectionpool | _new_conn | 208 | DEBUG | Starting new HTTP connection (1): 10.12.5.252
2018-12-12 12:46:00,585 | __main__ | db_monitoring | db_monitoring | 53 | INFO | DBM: Active DB Monitoring Instance
-
diff --git a/docs/sections/services/kpi-computation-ms/configuration.rst b/docs/sections/services/kpi-computation-ms/configuration.rst
index 269fd16a..f17b0ee5 100644
--- a/docs/sections/services/kpi-computation-ms/configuration.rst
+++ b/docs/sections/services/kpi-computation-ms/configuration.rst
@@ -7,6 +7,7 @@ Configuration
KPI Computation MS expects to be able to fetch configuration in following JSON format:
.. code-block:: json
+
{
"pollingInterval": 20,
"aafUsername": "dcae@dcae.onap.org",
@@ -40,4 +41,3 @@ KPI Computation MS expects to be able to fetch configuration in following JSON f
}
During ONAP OOM/Kubernetes deployment this configuration is created from Helm chart based on properties defined under **applicationConfig** section.
-
diff --git a/docs/sections/services/kpi-computation-ms/installation-helm.rst b/docs/sections/services/kpi-computation-ms/installation-helm.rst
index be43234f..87286baa 100644
--- a/docs/sections/services/kpi-computation-ms/installation-helm.rst
+++ b/docs/sections/services/kpi-computation-ms/installation-helm.rst
@@ -52,7 +52,7 @@ Deployment steps
- Update monitoring policy ID in below configuration which is used to enable Policy-Sync Side car container to be deployed and retrieves active policy configuration.
- .. code-block :: bash
+ .. code-block :: yaml
dcaePolicySyncImage: onap/org.onap.dcaegen2.deployments.dcae-services-policy-sync:1.0.1
policies:
@@ -61,7 +61,7 @@ Deployment steps
- Enable KPI MS component in oom/kubernetes/dcaegen2-services/values.yaml
- .. code-block:: bash
+ .. code-block:: yaml
dcae-kpi-ms:
enabled: true
diff --git a/docs/sections/services/kpi-computation-ms/kpi_computation_ms_overview.rst b/docs/sections/services/kpi-computation-ms/kpi_computation_ms_overview.rst
index 9e733e0f..d02e8c73 100644
--- a/docs/sections/services/kpi-computation-ms/kpi_computation_ms_overview.rst
+++ b/docs/sections/services/kpi-computation-ms/kpi_computation_ms_overview.rst
@@ -1,5 +1,5 @@
-.. This work is licensed under a Creative Commons Attribution 4.0
- International License. http://creativecommons.org/licenses/by/4.0
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
.. _docs_kpi_computation_ms_overview:
@@ -10,7 +10,7 @@ Introduction
Subscribe original PM data from DMaaP.
Do KPI computation based on KPI formula which can be got from config policies and the formula can be configued dynamically.
Publish KPI results on DMaaP.
- Receive request for specific KPI computation (future scope) on specific ‘objects’ (e.g., S-NSSAI, Service).
+ Receive request for specific KPI computation (future scope) on specific 'objects' (e.g., S-NSSAI, Service).
Architecture
------------
@@ -35,6 +35,9 @@ Publish a file to the PM-Mapper using the following example curl:
curl -k -X PUT https://dcae-pm-mapper:8443/delivery/<filename> -H 'X-DMAAP-DR-META:{"productName": "AcmeNode","vendorName": "Acme","lastEpochMicrosec": "1538478000000","sourceName": "oteNB5309","startEpochMicrosec": "1538478900000","timeZoneOffset": "UTC+05:00","location": "ftpes://127.0.0.1:22/ftp/rop/A20161224.1045-1100.bin.gz","compression": "gzip","fileFormatType": "org.3GPP.32.435#measCollec","fileFormatVersion": "V9"}' -H "Content-Type:application/xml" --data-binary @<filename> -H 'X-ONAP-RequestID: 12345' -H 'X-DMAAP-DR-PUBLISH-ID: 12345'
Example type A file:
+
+.. code-block:: xml
+
<?xml version="1.0" encoding="utf-8"?>
<measCollecFile xmlns="http://www.3gpp.org/ftp/specs/archive/32_series/32.435#measCollec">
<fileHeader dnPrefix="www.google.com" vendorName="CMCC" fileFormatVersion="32.435 V10.0">
@@ -73,11 +76,17 @@ Example type A file:
</fileFooter>
</measCollecFile>
+
Curl the topic on Message Router to retrieve the published event:
+.. code-block::
+
curl -k https://message-router:3905/events/unauthenticated.DCAE_KPI_OUTPUT/$ConsumerGroup/$ID
Example message output:
+
+.. code-block:: json
+
{
"event": {
"commonEventHeader": {
@@ -121,6 +130,7 @@ Example message output:
}
}
+
Interaction
"""""""""""
-Kpi Computation MS interacts with the Config Binding Service to get configuration information. \ No newline at end of file
+Kpi Computation MS interacts with the Config Binding Service to get configuration information.
diff --git a/docs/sections/services/mapper/SampleSnmpTrapConversion.rst b/docs/sections/services/mapper/SampleSnmpTrapConversion.rst
index b6ba41e4..26431bdd 100644
--- a/docs/sections/services/mapper/SampleSnmpTrapConversion.rst
+++ b/docs/sections/services/mapper/SampleSnmpTrapConversion.rst
@@ -9,58 +9,58 @@ Following is the **Sample SNMP Trap** that will be received by the Universal VES
.. code-block:: json
- {
+ {
"cambria.partition":"10.53.172.132",
"trap category":"ONAP-COLLECTOR-SNMPTRAP",
"community len":0,
"protocol version":"v2c",
- "varbinds":[
- {
+ "varbinds":[
+ {
"varbind_value":"CLEARED and CRITICAL severities have the same name",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.2.0",
"varbind_type":"OctetString"
},
- {
+ {
"varbind_value":"1.3",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.3.0",
"varbind_type":"ObjectIdentifier"
},
- {
+ {
"varbind_value":"1.3",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.4.0",
"varbind_type":"ObjectIdentifier"
},
- {
+ {
"varbind_value":"CLEARED",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.5.0",
"varbind_type":"OctetString"
},
- {
+ {
"varbind_value":"Queue manager: Process failure cleared",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.6.0",
"varbind_type":"OctetString"
},
- {
+ {
"varbind_value":"The queue manager process has been restored to normal operation",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.7.0",
"varbind_type":"OctetString"
},
- {
+ {
"varbind_value":"The queue manager process has been restored to normal operation. The previously issued alarm has been cleared",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.8.0",
"varbind_type":"OctetString"
},
- {
+ {
"varbind_value":"Changes to shared config will be synchronized across the cluster",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.9.0",
"varbind_type":"OctetString"
},
- {
+ {
"varbind_value":"No action",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.10.0",
"varbind_type":"OctetString"
},
- {
+ {
"varbind_value":"sprout-1.example.com",
"varbind_oid":"1.3.6.1.4.1.19444.12.2.0.12.0",
"varbind_type":"OctetString"
@@ -82,9 +82,9 @@ Following is the converted VES Format of the above SNMP Sample Trap by using the
.. code-block:: json
- {
- "event":{
- "commonEventHeader":{
+ {
+ "event":{
+ "commonEventHeader":{
"startEpochMicrosec":1.5350269902625413E9,
"eventId":"XXXX",
"sequence":0,
@@ -96,49 +96,49 @@ Following is the converted VES Format of the above SNMP Sample Trap by using the
"version":3,
"reportingEntityName":"VesAdapter"
},
- "faultFields":{
+ "faultFields":{
"eventSeverity":"MINOR",
"alarmCondition":"ONAP-COLLECTOR-SNMPTRAP",
"faultFieldsVersion":2,
"specificProblem":"SNMP Fault",
- "alarmAdditionalInformation":[
- {
+ "alarmAdditionalInformation":[
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.2.0",
"value":"CLEARED and CRITICAL severities have the same name"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.3.0",
"value":"1.3"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.4.0",
"value":"1.3"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.5.0",
"value":"CLEARED"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.6.0",
"value":"Queue manager: Process failure cleared"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.7.0",
"value":"The queue manager process has been restored to normal operation"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.8.0",
"value":"The queue manager process has been restored to normal operation. The previously issued alarm has been cleared"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.9.0",
"value":"Changes to shared config will be synchronized across the cluster"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.10.0",
"value":"No action"
},
- {
+ {
"name":"1.3.6.1.4.1.19444.12.2.0.12.0",
"value":"sprout-1.example.com"
}
@@ -147,4 +147,4 @@ Following is the converted VES Format of the above SNMP Sample Trap by using the
"vfStatus":"Active"
}
}
- } \ No newline at end of file
+ }
diff --git a/docs/sections/services/mapper/delivery.rst b/docs/sections/services/mapper/delivery.rst
index 3f667635..e9d24984 100644
--- a/docs/sections/services/mapper/delivery.rst
+++ b/docs/sections/services/mapper/delivery.rst
@@ -4,12 +4,12 @@
Delivery
========
-Mapper is delivered with **1 Docker container** having spring boot microservice, **UniversalVesAdapter**. UniversalVesAdapter converts telementary data to VES.
+Mapper is delivered with **1 Docker container** having spring boot microservice, **UniversalVesAdapter**. UniversalVesAdapter converts telementary data to VES.
| In current release, the UniversalVesAdapter is integrated with DCAE's config binding service. On start, it fetches the initial configuration from CBS and uses the same. Currently it is not having functionality to refresh the configuration changes made into Consul KV store.
Docker Containers
-----------------
-Docker images can be pulled from ONAP Nexus repository with below commands:
+Docker images can be pulled from ONAP Nexus repository with below commands:
``docker pull nexus3.onap.org:10001/onap/org.onap.dcaegen2.services.mapper.vesadapter.universalvesadaptor:latest``
diff --git a/docs/sections/services/mapper/flow.rst b/docs/sections/services/mapper/flow.rst
index 71cebfca..56eded56 100644
--- a/docs/sections/services/mapper/flow.rst
+++ b/docs/sections/services/mapper/flow.rst
@@ -7,24 +7,24 @@ Flow for converting RestConf Collector notification
===================================================
[1] RestConf Collector generates rcc-notication in JSON format and publishes it on DMaaP topic **unathenticated.DCAE_RCC_OUTPUT**
[2] The Universal VES Adapter(UVA) microservice has subscribed to this DMaaP topic.
-[3] On receiving an event from DMaaP, the adapter uses the corresponding mapping file and converts the received notification into the VES event. It uses the notification-id from the received notification to find the required mapping file.
+[3] On receiving an event from DMaaP, the adapter uses the corresponding mapping file and converts the received notification into the VES event. It uses the notification-id from the received notification to find the required mapping file.
[4] Those notifications for which no mapping file is identified, a default mapping file is used with generic mappings to create the VES event.
[5] The VES formatted Event will be then published on DMaaP topic **unauthenticated.VES_PNFREG_OUTPUT**.
.. image:: ./flow-rest-conf.png
:alt: RestConf flow
-
+
Flow for converting SNMP Collector notification
===============================================
[1] VNF submits SNMP traps to the SNMP collector.
[2] Collector converts the trap into JSON format and publishes it on DMaaP topic **unauthenticated.ONAP-COLLECTOR-SNMPTRAP**
[3] The Universal VES Adapter(UVA) microservice has subscribed to this DMaaP topic.
-[4] On receiving an event from DMaaP, the adapter uses the corresponding mapping file and converts the received event into the VES event. It uses the enterprise ID from the received event to find the required mapping file.
+[4] On receiving an event from DMaaP, the adapter uses the corresponding mapping file and converts the received event into the VES event. It uses the enterprise ID from the received event to find the required mapping file.
[5] Those SNMP Traps for which no mapping file is identified, a default mapping file is used with generic mappings to create the VES event.
[6] The VES formatted Event will be then published on DMaaP topic **unauthenticated.SEC_FAULT_OUTPUT**.
.. image:: ./flow.png
- :alt: SNMP flow \ No newline at end of file
+ :alt: SNMP flow
diff --git a/docs/sections/services/mapper/index.rst b/docs/sections/services/mapper/index.rst
index de534be2..9ff3ff4a 100644
--- a/docs/sections/services/mapper/index.rst
+++ b/docs/sections/services/mapper/index.rst
@@ -6,12 +6,12 @@
VES-Mapper
==========
-Different VNF vendors generate event and telemetry data in different formats. Out of the box, all VNF vendors may not support VES format.
+Different VNF vendors generate event and telemetry data in different formats. Out of the box, all VNF vendors may not support VES format.
VES-Mapper provides a generic adapter to convert different formats of event and telemetry data into VES structure that can be consumed by existing DCAE analytics applications.
-
+
| *Note*: Currently mapping files are available for SNMP collector and RESTConf collector.
-**VES-Mapper** converts the telemetry data into the required VES format and publishes to the DMaaP for further action to be taken by the DCAE analytics applications.
+**VES-Mapper** converts the telemetry data into the required VES format and publishes to the DMaaP for further action to be taken by the DCAE analytics applications.
.. toctree::
@@ -19,7 +19,7 @@ VES-Mapper provides a generic adapter to convert different formats of event and
./flow.rst
./delivery.rst
- ./installation.rst
+ ./installation-helm.rst
./mappingfile.rst
./SampleSnmpTrapConversion
- ./troubleshooting.rst \ No newline at end of file
+ ./troubleshooting.rst
diff --git a/docs/sections/services/mapper/mappingfile.rst b/docs/sections/services/mapper/mappingfile.rst
index 7333963c..642b84ab 100644
--- a/docs/sections/services/mapper/mappingfile.rst
+++ b/docs/sections/services/mapper/mappingfile.rst
@@ -13,7 +13,7 @@ The Adapter uses Smooks Framework to do the data format conversion by using the
SNMP Collector Default Mapping File
===================================
-Following is the default snmp mapping file which is used when no mapping file is found while processing event from SNMP Trap Collector.
+Following is the default snmp mapping file which is used when no mapping file is found while processing event from SNMP Trap Collector.
.. code-block:: xml
@@ -62,7 +62,7 @@ Following is the default snmp mapping file which is used when no mapping file is
RestConf Collector Default Mapping File
=======================================
-Following is the default RestConf collector mapping file which is used when no mapping file is found while processing notification from RestConf Collector.
+Following is the default RestConf collector mapping file which is used when no mapping file is found while processing notification from RestConf Collector.
.. code-block:: xml
diff --git a/docs/sections/services/mapper/troubleshooting.rst b/docs/sections/services/mapper/troubleshooting.rst
index 859bf6e4..7ac4f9c2 100644
--- a/docs/sections/services/mapper/troubleshooting.rst
+++ b/docs/sections/services/mapper/troubleshooting.rst
@@ -45,7 +45,7 @@ Deployment/Installation errors
|13:04:37.537 [main] ERROR errorLogger - Application stoped due to missing default Config file
|13:04:37.538 [main] INFO o.s.s.c.ThreadPoolTaskExecutor - Shutting down ExecutorService 'applicationTaskExecutor'
|15:40:43.982 [main] WARN debugLogger - All Smooks objects closed
-
+
**These log messages are printed when the default configuration file "kv.json", was not present.**
@@ -78,4 +78,4 @@ If Default Config File is an invalid json file, we will get below exception
**Invalid Smooks mapping file**
-If VES-Mapper blueprint or local config file contains invalid Smooks mapping file, then we will get below SAXException / JsonProcessingException / JsonSyntaxException / JsonParseException while processing the incoming notifications and the notification will be dropped without converting into required VES event. All such dropped notifications will be logged in error log file. \ No newline at end of file
+If VES-Mapper blueprint or local config file contains invalid Smooks mapping file, then we will get below SAXException / JsonProcessingException / JsonSyntaxException / JsonParseException while processing the incoming notifications and the notification will be dropped without converting into required VES event. All such dropped notifications will be logged in error log file.
diff --git a/docs/sections/services/pm-mapper/configuration.rst b/docs/sections/services/pm-mapper/configuration.rst
index 2fd89a70..fbac01c7 100644
--- a/docs/sections/services/pm-mapper/configuration.rst
+++ b/docs/sections/services/pm-mapper/configuration.rst
@@ -25,7 +25,7 @@ Disable TLS
Pm-mapper by default uses communication over TLS, but it is also possible to use plain http request. To disable TLS, set configuration flag 'enable_http' to true, and set the certificate paths to empty strings or remove them from the configuration. See the config.yaml examples below.
.. code-block:: yaml
-
+
applicationConfig:
enable_http: true
key_store_path: ""
@@ -35,16 +35,16 @@ Pm-mapper by default uses communication over TLS, but it is also possible to use
-Or
+Or
.. code-block:: yaml
applicationConfig:
enable_http: true
- #key_store_path:
- #key_store_pass_path:
- #trust_store_path:
- #trust_store_pass_path:
+ #key_store_path:
+ #key_store_pass_path:
+ #trust_store_path:
+ #trust_store_pass_path:
Unauthenticated topic
@@ -52,7 +52,7 @@ Unauthenticated topic
To use unauthenticated topics :ref:`disable TLS <pm_mapper_disable_tls>`, and edit AAF credentials in configuration, it should be removed or set to empty string. See the examples below.
.. code-block:: yaml
-
+
applicationConfig:
aaf_identity: ""
aaf_password: ""
@@ -61,10 +61,10 @@ To use unauthenticated topics :ref:`disable TLS <pm_mapper_disable_tls>`, and ed
Or
.. code-block:: yaml
-
+
applicationConfig:
- #aaf_identity:
- #aaf_password:
+ #aaf_identity:
+ #aaf_password:
diff --git a/docs/sections/services/pm-mapper/installation-helm.rst b/docs/sections/services/pm-mapper/installation-helm.rst
index 04e7503d..badfb875 100644
--- a/docs/sections/services/pm-mapper/installation-helm.rst
+++ b/docs/sections/services/pm-mapper/installation-helm.rst
@@ -83,7 +83,7 @@ The configuration update process is very straightforward.
The only step is to modify the Config Map which contains the configuration and save the change. PM-Mapper will detect the new configuration values after a while.
It should be visible in PM-Mapper logs, for example:
-.. code-block:: text
+.. code-block:: none
...
2022-02-11T08:04:02.627Z main INFO org.onap.dcaegen2.services.sdk.rest.services.cbs.client.impl.CbsClientConfigMap Got successful output from ConfigMap file
@@ -94,4 +94,3 @@ It should be visible in PM-Mapper logs, for example:
This logs fragment proves that the configuration source is Config Map: ``Got successful output from ConfigMap file``.
It also prints the current configuration (the last log line above). PM-Mapper keeps checking the configuration file periodically (every 60s).
-
diff --git a/docs/sections/services/pm-mapper/troubleshooting.rst b/docs/sections/services/pm-mapper/troubleshooting.rst
index add7b7ae..444f7132 100644
--- a/docs/sections/services/pm-mapper/troubleshooting.rst
+++ b/docs/sections/services/pm-mapper/troubleshooting.rst
@@ -125,5 +125,3 @@ Make sure Config Binding Service is up and running and the **ip + port** combina
**PM Mapper** logs this information when connected to Consul, but cannot find a valid JSON configuration.
-
-
diff --git a/docs/sections/services/pm-subscription-handler/installation.rst b/docs/sections/services/pm-subscription-handler/installation.rst
index d0a6e404..1bba3f95 100644
--- a/docs/sections/services/pm-subscription-handler/installation.rst
+++ b/docs/sections/services/pm-subscription-handler/installation.rst
@@ -25,7 +25,7 @@ Deployment Steps
- Enable PMSH component in oom/kubernetes/dcaegen2-services/values.yaml
- .. code-block:: bash
+ .. code-block:: yaml
dcae-pmsh:
enabled: true
diff --git a/docs/sections/services/pm-subscription-handler/logging.rst b/docs/sections/services/pm-subscription-handler/logging.rst
index f24fdf0a..a0ef9897 100644
--- a/docs/sections/services/pm-subscription-handler/logging.rst
+++ b/docs/sections/services/pm-subscription-handler/logging.rst
@@ -28,4 +28,4 @@ captured. This will affect both STDOUT logs and the logs written to application.
loggers:
onap_logger:
- level: INFO \ No newline at end of file
+ level: INFO
diff --git a/docs/sections/services/pm-subscription-handler/offeredapi.rst b/docs/sections/services/pm-subscription-handler/offeredapi.rst
index 9dfce02b..e9fac8e8 100644
--- a/docs/sections/services/pm-subscription-handler/offeredapi.rst
+++ b/docs/sections/services/pm-subscription-handler/offeredapi.rst
@@ -51,7 +51,7 @@ Responses
Sample Subscription Body
~~~~~~~~~~~~~~~~~~~~~~~~
-.. code-block:: http
+.. code-block:: json
{
"subscription": {
@@ -168,7 +168,7 @@ Update a Subscription nf filter
Sample NF Filter Body
~~~~~~~~~~~~~~~~~~~~~~~~
-.. code-block:: http
+.. code-block:: json
{
"nfFilter": {
@@ -216,7 +216,7 @@ Create a measurement group for a given subscription
Sample Measurement Group Body
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. code-block:: http
+.. code-block:: json
{
"measurementGroup": {
diff --git a/docs/sections/services/pm-subscription-handler/resources/monitoring-policy.json b/docs/sections/services/pm-subscription-handler/resources/monitoring-policy.json
index f142f8b4..9d1619fa 100644
--- a/docs/sections/services/pm-subscription-handler/resources/monitoring-policy.json
+++ b/docs/sections/services/pm-subscription-handler/resources/monitoring-policy.json
@@ -69,4 +69,4 @@
}
]
}
-} \ No newline at end of file
+}
diff --git a/docs/sections/services/prh/architecture.rst b/docs/sections/services/prh/architecture.rst
index 32314984..6ac68015 100644
--- a/docs/sections/services/prh/architecture.rst
+++ b/docs/sections/services/prh/architecture.rst
@@ -12,7 +12,5 @@ PRH Processing Flow
.. image:: ../../images/prhAlgo.png
-In London release, a new mode has been introduced which allows a PNF to send the registration event early, before SO registers the PNF in AAI. A timeout can be configured for the time until such an event is considered valid (default set to 1 day). When PRH receives such an event, and does not find the corresponding PNF in AAI, it will retry the check in AAI till either the PNF is found in AAI, or the timeout occurs (whichever is earlier).This does not block the processing of any events received after such a non-correlated event.
+In London release, a new mode has been introduced which allows a PNF to send the registration event early, before SO registers the PNF in AAI. A timeout can be configured for the time until such an event is considered valid (default set to 1 day). When PRH receives such an event, and does not find the corresponding PNF in AAI, it will retry the check in AAI till either the PNF is found in AAI, or the timeout occurs (whichever is earlier).This does not block the processing of any events received after such a non-correlated event.
This mode is not the default mode in which PRH is installed, and has to enabled in the PRH Helm chart. Since it uses a native Kafka consumer and not DMAAP consumer, certain Kafka and Strimzi related configurable parameters are required, as described in the Configuration section.
-
-
diff --git a/docs/sections/services/prh/authorization.rst b/docs/sections/services/prh/authorization.rst
index d1fb8f2d..14e4091b 100644
--- a/docs/sections/services/prh/authorization.rst
+++ b/docs/sections/services/prh/authorization.rst
@@ -21,9 +21,9 @@ Default
Certificate-based
"""""""""""""""""
| There is an option to enable certificate-based authentication for PRH towards AAI service calls.
-| To achieve this secure flag needs to be turned on in PRH :ref:`configuration<prh_configuration>` :
+| To achieve this secure flag needs to be turned on in PRH :ref:`configuration <prh_configuration>` :
-.. code-block:: json
+.. code-block:: bash
security.enableAaiCertAuth=true
@@ -39,9 +39,9 @@ Default
Certificate-based
""""""""""""""""""
| There is an option to enable certificate-based authentication for PRH towards DMaaP Bus Controller service calls.
-| To achieve this secure flag needs to be turned on in PRH :ref:`configuration<prh_configuration>` :
+| To achieve this secure flag needs to be turned on in PRH :ref:`configuration <prh_configuration>` :
-.. code-block:: json
+.. code-block:: bash
--security.enableDmaapCertAuth=true
@@ -55,7 +55,7 @@ PRH identity and certificate data
| See :doc:`../../tls_enablement` for detailed information.
|
| PRH is using four files from ``tls-info`` DCAE volume (``cert.jks, jks.pass, trust.jks, trust.pass``).
-| Refer :ref:`configuration<prh_configuration>` for proper security attributes settings.
+| Refer :ref:`configuration <prh_configuration>` for proper security attributes settings.
|
| **IMPORTANT** Even when certificate-based authentication security features are disabled,
| still all security settings needs to be provided in configuration to make PRH service start smoothly.
diff --git a/docs/sections/services/prh/configuration.rst b/docs/sections/services/prh/configuration.rst
index 560c08c8..832611d4 100644
--- a/docs/sections/services/prh/configuration.rst
+++ b/docs/sections/services/prh/configuration.rst
@@ -1,6 +1,7 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
+.. _prh_configuration:
Configuration
=============
@@ -67,11 +68,10 @@ PRH fetches configuration directly from CBS service in the following JSON format
The configuration is created from PRH helm charts by specifying **applicationConfig** during ONAP OOM/Kubernetes deployment.
-For PRH 1.9.0 version (London) , a new mode has been introduced which allows early PNF registrations. This mode uses a direct Kafka consumer and not the DMAAP consumer. This mode is not the default mode and has to be activated by setting certain environment variables in the Helm chart values.yaml file under **applicationEnv**, as shown below:
+For PRH 1.9.0 version (London) , a new mode has been introduced which allows early PNF registrations. This mode uses a direct Kafka consumer and not the DMAAP consumer. This mode is not the default mode and has to be activated by setting certain environment variables in the Helm chart values.yaml file under **applicationEnv**, as shown below:
.. code-block:: yaml
-
- name: kafkaBoostrapServerConfig
value: onap-strimzi-kafka-bootstrap:9092
- name: groupIdConfig
@@ -92,4 +92,3 @@ For PRH 1.9.0 version (London) , a new mode has been introduced which allows ear
secretKeyRef:
key: sasl.jaas.config
name: strimzi-kafka-admin
-
diff --git a/docs/sections/services/prh/delivery.rst b/docs/sections/services/prh/delivery.rst
index 63a744f7..53e9faad 100644
--- a/docs/sections/services/prh/delivery.rst
+++ b/docs/sections/services/prh/delivery.rst
@@ -6,4 +6,4 @@ Delivery
**PRH** is delivered as a docker container. It is published in ONAP Nexus repository.
-Full image name is `onap/org.onap.dcaegen2.services.prh.prh-app-server`. \ No newline at end of file
+Full image name is `onap/org.onap.dcaegen2.services.prh.prh-app-server`.
diff --git a/docs/sections/services/prh/installation.rst b/docs/sections/services/prh/installation.rst
index e73fae6a..6f591e79 100644
--- a/docs/sections/services/prh/installation.rst
+++ b/docs/sections/services/prh/installation.rst
@@ -154,9 +154,9 @@ Supported configuration modifiable in HELM charts under **applicationConfig** se
The location of the configuration file should be set in ``CBS_CLIENT_CONFIG_PATH`` env, for example:
``CBS_CLIENT_CONFIG_PATH: /app-config-input/application_config.yaml``
-
-
-For PRH 1.9.0 version (London) , a new mode has been introduced which allows early PNF registrations. This mode uses a direct Kafka consumer and not the DMAAP consumer. This mode is not the default mode and has to be activated by setting certain environment variables in the Helm chart values.yaml file under **applicationEnv**, as shown below:
+
+
+For PRH 1.9.0 version (London) , a new mode has been introduced which allows early PNF registrations. This mode uses a direct Kafka consumer and not the DMAAP consumer. This mode is not the default mode and has to be activated by setting certain environment variables in the Helm chart values.yaml file under **applicationEnv**, as shown below:
.. code-block:: yaml
@@ -181,4 +181,3 @@ For PRH 1.9.0 version (London) , a new mode has been introduced which allows ear
secretKeyRef:
key: sasl.jaas.config
name: strimzi-kafka-admin
-
diff --git a/docs/sections/services/restconf/development_info.rst b/docs/sections/services/restconf/development_info.rst
index 31e2ddbe..8c194e5e 100644
--- a/docs/sections/services/restconf/development_info.rst
+++ b/docs/sections/services/restconf/development_info.rst
@@ -1,9 +1,13 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
+
Compiling RestConf Collector
=============================
RestconfCollector is a sub-project of dcaegen2/colletcors/ (https://gerrit.onap.org/r/dcaegen2/collectors/restconf).
To build the Restconf Collector component, run the following maven command from within **collectors/restconf** directory
-`mvn clean install`
+`mvn clean install`
Maven GroupId:
diff --git a/docs/sections/services/restconf/functionality.rst b/docs/sections/services/restconf/functionality.rst
index 15d42068..4cfe2569 100644
--- a/docs/sections/services/restconf/functionality.rst
+++ b/docs/sections/services/restconf/functionality.rst
@@ -1,3 +1,7 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
+
Functionality
=============
@@ -9,5 +13,5 @@ RestconfCollector interaction with an external controller.
.. image:: ../../images/rcc_diag_interact.png
-For more details about the Restconfcollector, visit
+For more details about the Restconfcollector, visit
* https://wiki.onap.org/pages/viewpage.action?pageId=60891182
diff --git a/docs/sections/services/restconf/index.rst b/docs/sections/services/restconf/index.rst
index 66564e90..9b0240e7 100644
--- a/docs/sections/services/restconf/index.rst
+++ b/docs/sections/services/restconf/index.rst
@@ -2,20 +2,20 @@
.. http://creativecommons.org/licenses/by/4.0
==================
-RestConf Collector
+RestConf Collector
==================
Overview
========
Restconf collector is a microservice in ONAP DCAE. It subscribes to external controllers
-and receives event data. After receiving event data it may modify it as per usecase's requirement and
+and receives event data. After receiving event data it may modify it as per usecase's requirement and
produce a DMaaP event. This DMaap event usually consumed by VES mapper.
Restconf Collector can subscribe multiple events from multiple controllers.
.. toctree::
:maxdepth: 1
- ./installation
+ ./installation-helm
./functionality
./development_info
diff --git a/docs/sections/services/serviceindex.rst b/docs/sections/services/serviceindex.rst
index 1701f0fb..52523b3c 100644
--- a/docs/sections/services/serviceindex.rst
+++ b/docs/sections/services/serviceindex.rst
@@ -24,7 +24,7 @@ Event Processor
---------------
.. toctree::
:maxdepth: 1
-
+
./datalake-handler/index.rst
./mapper/index.rst
./pm-mapper/index.rst
@@ -35,18 +35,18 @@ Analytics
.. toctree::
:maxdepth: 1
-
+
./heartbeat-ms/index.rst
./kpi-computation-ms/index.rst
./pm-subscription-handler/index.rst
./slice-analysis-ms/index.rst
./son-handler/index.rst
./tcagen2-docker/index.rst
-
+
Miscellaneous Services
----------------------
.. toctree::
:maxdepth: 1
-
+
./ves-openapi-manager/index.rst
diff --git a/docs/sections/services/slice-analysis-ms/installation-helm.rst b/docs/sections/services/slice-analysis-ms/installation-helm.rst
index b8542d24..ae0437f7 100644
--- a/docs/sections/services/slice-analysis-ms/installation-helm.rst
+++ b/docs/sections/services/slice-analysis-ms/installation-helm.rst
@@ -42,7 +42,8 @@ Deployment steps
3. To un-deploy
- .. code-block:: bash
+
+.. code-block:: bash
helm uninstall <slice_analysis_ms>
@@ -88,4 +89,3 @@ Application configurations
|dcae_cl_response_topic | Dmaap topic URL to which Policy posts the |
| | message after successful control loop trigger |
+-------------------------------+------------------------------------------------+
-
diff --git a/docs/sections/services/slice-analysis-ms/runtime_configuration.rst b/docs/sections/services/slice-analysis-ms/runtime_configuration.rst
index a0772c86..26d116d5 100644
--- a/docs/sections/services/slice-analysis-ms/runtime_configuration.rst
+++ b/docs/sections/services/slice-analysis-ms/runtime_configuration.rst
@@ -27,7 +27,7 @@ Deployment
1. Enable dcae-slice-analysis-ms in values.yaml. When using the helm chart of OOM to pull up the whole onap environment, dcae-slice-analysis-ms will be automatically installed.
- .. code-block:: bash
+ .. code-block:: yaml
dcae-slice-analysis-ms:
enabled: true
@@ -37,7 +37,7 @@ Deployment
1. Uncomment the following lines. "duration" is the interval of a thread in policy sync container to retrieve latest policy from XCAML PDP engine. The unit of "duration" is seconds.
- .. code-block:: bash
+ .. code-block:: yaml
dcaePolicySyncImage: onap/org.onap.dcaegen2.deployments.dcae-services-policy-sync:1.0.1
policies:
@@ -76,7 +76,7 @@ Steps to Use Runtime Configuration
request body: policy_type.json
- .. code-block:: bash
+ .. code-block:: json
{
"tosca_definitions_version": "tosca_simple_yaml_1_1_0",
@@ -116,7 +116,7 @@ Steps to Use Runtime Configuration
request body: slicems_config_policy.json
- .. code-block:: bash
+ .. code-block:: json
{
"tosca_definitions_version": "tosca_simple_yaml_1_1_0",
@@ -143,24 +143,25 @@ Steps to Use Runtime Configuration
}
3. Deploy the policy
+
command
.. code-block:: bash
- curl -w %{http_code} --silent -k --user 'username:password' -X POST "https://policyPAPApi:6969/policy/pap/v1/pdps/policies" -H "Accept: application/json" -H "Content-Type: application/json" -d @push.json
+ curl -w %{http_code} --silent -k --user 'username:password' -X POST "https://policyPAPApi:6969/policy/pap/v1/pdps/policies" -H "Accept: application/json" -H "Content-Type: application/json" -d @push.json
request body: push.json
- .. code-block:: bash
+ .. code-block:: json
- {
- "policies": [
- {
- "policy-id": "onap.dcae.slicems.config",
- "policy-version": 1
- }
- ]
- }
+ {
+ "policies": [
+ {
+ "policy-id": "onap.dcae.slicems.config",
+ "policy-version": 1
+ }
+ ]
+ }
4. Verify in SliceMS that configurations received
@@ -168,8 +169,8 @@ Steps to Use Runtime Configuration
How to Develop Your Own Runtime Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
1. Create your own policy type
2. Create a policy based on your policy type
3. Deploy the policy
4. Verify in SliceMS that configurations received. (Needs to write code in SliceMS to deal with your configurations in advance.)
-
diff --git a/docs/sections/services/slice-analysis-ms/slice_analysis_ms_overview.rst b/docs/sections/services/slice-analysis-ms/slice_analysis_ms_overview.rst
index 3f9d6ecd..2a0666bb 100644
--- a/docs/sections/services/slice-analysis-ms/slice_analysis_ms_overview.rst
+++ b/docs/sections/services/slice-analysis-ms/slice_analysis_ms_overview.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-
+
.. _docs_slice_analysis_ms_overview:
Architecture
@@ -44,7 +44,7 @@ Functional Description
----------------------
- Slice Analysis ms consumes PM messages from PERFORMANCE_MEASUREMENTS topic.
-- For analysis Slice Analysis MS consumes various data from Config DB including List of Network
+- For analysis Slice Analysis MS consumes various data from Config DB including List of Network
Functions which serves the S-NSSAI, List of Near-RT RICs and the corresponding cell mappings of the
S-NSSAI, Current Configuration of the Near-RT RICs, Slice Profile associated with the S-NSSAI and
Subscriber details of the S-NSSAI (for sending the onset message to policy).
diff --git a/docs/sections/services/slice-analysis-ms/slice_analysis_ms_troubleshooting.rst b/docs/sections/services/slice-analysis-ms/slice_analysis_ms_troubleshooting.rst
index 66dee8c2..35f5202b 100644
--- a/docs/sections/services/slice-analysis-ms/slice_analysis_ms_troubleshooting.rst
+++ b/docs/sections/services/slice-analysis-ms/slice_analysis_ms_troubleshooting.rst
@@ -1,14 +1,18 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
+
Trouble shooting steps
----------------------
1. **Microservice stops and restarts during startup**
- Possible reason & Solution: Microservice is not registered with the consul
+ Possible reason & Solution: Microservice is not registered with the consul
- Check the consul if the microservice is registered with it and the MS is able to fetch the app config from the CBS. Check if CBS and consul are deployed properly and try to redeploy the MS
The below logs will be seen if CBS is not reachable by the MS
- 15:14:13.861 [main] WARN org.postgresql.Driver - JDBC URL port: 0 not valid (1:65535)
+ 15:14:13.861 [main] WARN org.postgresql.Driver - JDBC URL port: 0 not valid (1:65535)
15:14:13.862 [main] WARN o.s.b.w.s.c.AnnotationConfigServletWebServerApplicationContext -
- Exception encountered during context initialization - cancelling refresh attempt:
+ Exception encountered during context initialization - cancelling refresh attempt:
org.springframework.beans.factory.UnsatisfiedDependencyException: Error creating bean with name
'org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaConfiguration': Unsatisfied
dependency expressed through constructor parameter 0; nested exception is
@@ -26,7 +30,7 @@ Trouble shooting steps
2. **No PostgreSQL clusters have been deployed on this manager**
Solution:
-
+
kubectl exec -ti -n onap dev-dcaemod-db-primary-56ff585cf7-dxkkx bash
psql
ALTER ROLE "postgres" WITH PASSWORD 'onapdemodb';
@@ -43,4 +47,4 @@ Logging
Since the Slice Analysis MS is deployed as a pod in the kubernetes, we can check the logs by
using the following command:
- $ kubectl logs <pod-name> –namespace onap \ No newline at end of file
+ $ kubectl logs <pod-name> -namespace onap
diff --git a/docs/sections/services/snmptrap/administration.rst b/docs/sections/services/snmptrap/administration.rst
index ab5ec6e2..44948174 100644
--- a/docs/sections/services/snmptrap/administration.rst
+++ b/docs/sections/services/snmptrap/administration.rst
@@ -23,7 +23,7 @@ The **trapd** service can be started by running the command:
Output from this command will be two-fold. First will be the textual response:
-.. code-block:: bash
+.. code-block:: none
2018-10-16T15:14:59,461 Starting snmptrapd...
2018-10-16T19:15:01,966 ONAP controller not present, trying json config override via CBS_SIM_JSON env variable
@@ -45,7 +45,7 @@ The **trapd** container can be monitored for status by running this command from
If **SNMPTRAPD** is present/running, output from this command will be:
-.. code-block:: bash
+.. code-block:: none
2018-10-16T15:01:47,705 Status: snmptrapd running
ucsnmp 16109 16090 0 Oct08 ? 00:07:16 python ./snmptrapd.py
@@ -61,7 +61,7 @@ and the return code presented to the shell upon exit:
If **trapd** is not present, output from this command will be:
-.. code-block:: bash
+.. code-block:: none
2018-10-16T15:10:47,815 PID file /opt/app/snmptrap/tmp/snmptrapd.py.pid does not exist or not readable - unable to check status of snmptrapd
2018-10-16T15:10:47,816 Diagnose further at command line as needed.
@@ -82,7 +82,7 @@ Stopping trapd
Output from this command will be two-fold. First will be the textual response:
-.. code-block:: bash
+.. code-block:: none
2018-10-16T15:10:07,808 Stopping snmptrapd PID 16109...
2018-10-16T15:10:07,810 Stopped
diff --git a/docs/sections/services/snmptrap/architecture.rst b/docs/sections/services/snmptrap/architecture.rst
index e65be786..88fa37c6 100644
--- a/docs/sections/services/snmptrap/architecture.rst
+++ b/docs/sections/services/snmptrap/architecture.rst
@@ -4,12 +4,12 @@
Architecture
============
-The ONAP **SNMPTRAP** project (referred to as **"trapd"** - as in "trap daemon" throughout
+The ONAP **SNMPTRAP** project (referred to as **"trapd"** - as in "trap daemon" throughout
this documentation) is a network facing ONAP platform component.
The simple network management protocol (or "SNMP", for short) is a pervasive
-communication protocol standard used between managed devices and a management system.
-It is used to relay data that can be valuable in the operation, fault identification
+communication protocol standard used between managed devices and a management system.
+It is used to relay data that can be valuable in the operation, fault identification
and planning processes of all networks.
SNMP utilizes a message called a "trap" to inform SNMP managers of abnormal
@@ -22,10 +22,10 @@ software processes or anything else specific to the agent's environment.
Capabilities
------------
-**trapd** receives SNMP traps and publishes them to a message router (DMAAP/MR)
+**trapd** receives SNMP traps and publishes them to a message router (DMAAP/MR)
instance based on attributes obtained from configuration binding service ("CBS").
-.. image:: ./ONAP_trapd.png
+.. image:: ./ONAP_trapd.png
Interactions
diff --git a/docs/sections/services/snmptrap/configuration.rst b/docs/sections/services/snmptrap/configuration.rst
index 5c81aafe..f421afb4 100644
--- a/docs/sections/services/snmptrap/configuration.rst
+++ b/docs/sections/services/snmptrap/configuration.rst
@@ -56,7 +56,7 @@ Potential Config Changes in your environment
"minimum_severity_to_log": 2 # minimum message level to log; 0 recommended for debugging, 3+ recommended for runtime/production
in snmpv3_config section:
-
+
(see detailed snmpv3_config discussion below)
snmpv3_config
@@ -419,5 +419,5 @@ The format of the JSON configuration that drives all behavior of SNMPTRAP is pro
"usmAesCfb256Protocol": "privkey47",
"usmHMAC384SHA512AuthProtocol": "authkey47"
}
-
+
}
diff --git a/docs/sections/services/snmptrap/delivery.rst b/docs/sections/services/snmptrap/delivery.rst
index 70bdba3b..f826c57b 100644
--- a/docs/sections/services/snmptrap/delivery.rst
+++ b/docs/sections/services/snmptrap/delivery.rst
@@ -20,4 +20,3 @@ Standalone
.. code-block:: bash
gerrit.onap.org:29418/dcaegen2/collectors/snmptrap
-
diff --git a/docs/sections/services/snmptrap/installation.rst b/docs/sections/services/snmptrap/installation.rst
index 9c549948..b733eb1c 100644
--- a/docs/sections/services/snmptrap/installation.rst
+++ b/docs/sections/services/snmptrap/installation.rst
@@ -7,15 +7,15 @@ Installation
An environment suitable for running docker containers is recommended.
If that is not available, SNMPTRAP source can be downloaded and run
-in a VM or on baremetal.
+in a VM or on baremetal.
Both scenarios are documented below.
As a docker container
---------------------
-**trapd** is delivered as a docker container based on python 3.6. The
-host or VM that will run this container must have the docker application
+**trapd** is delivered as a docker container based on python 3.6. The
+host or VM that will run this container must have the docker application
loaded and available to the userID that will be running the SNMPTRAP container.
If running from a docker container, it is assumed that *Config
@@ -36,12 +36,12 @@ nexus and launch it in the container named "trapd":
``docker run --detach -t --rm -p 162:6162/udp -P --name=trapd nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.snmptrap:2.0.6 ./bin/snmptrapd.sh start``
Running an instance of **trapd** will result in arriving traps being published
-to the topic specified by Config Binding Services.
+to the topic specified by Config Binding Services.
Standalone
----------
-**trapd** can also be run outside of a container environment, without CBS interactions.
+**trapd** can also be run outside of a container environment, without CBS interactions.
If CBS is not present, SNMPTRAP will look for a JSON configuration file specified via the
environment variable CBS_SIM_JSON at startup. Location of this file should be specified
as a relative path from the <SNMPTRAP base directory>/bin directory. E.g.
@@ -55,8 +55,8 @@ Prerequisites
trapd requires the following to run in a non-docker environment:
- Python 3.6+
- - Python module “pysnmp” 4.4.5
- - Python module “requests” 2.18.3
+ - Python module "pysnmp" 4.4.5
+ - Python module "requests" 2.18.3
To install prerequisites:
@@ -83,7 +83,7 @@ Download a copy of the latest trapd image from gerrit in it's standard runtime l
Configure for your environment
""""""""""""""""""""""""""""""
-In a non-docker environment, ONAP trapd is controlled by a locally hosted JSON configuration file. It is
+In a non-docker environment, ONAP trapd is controlled by a locally hosted JSON configuration file. It is
referenced in the trapd startup script as:
.. code-block:: bash
@@ -97,9 +97,9 @@ This file should be in the exact same format is the response from CBS in a fully
/opt/app/snmptrap/etc/snmptrapd.json
-Make applicable changes to this file - typically things that will need to change include:
+Make applicable changes to this file - typically things that will need to change include:
-.. code-block:: bash
+.. code-block:: json
"topic_url": "http://localhost:3904/events/ONAP-COLLECTOR-SNMPTRAP"
@@ -115,4 +115,3 @@ Start the application
"""""""""""""""""""""
``nohup /opt/app/snmptrap/bin/snmptrapd.sh start > /opt/app/snmptrap/logs/snmptrapd.out 2>&1 &``
-
diff --git a/docs/sections/services/snmptrap/logging.rst b/docs/sections/services/snmptrap/logging.rst
index 23c92296..089dde8c 100644
--- a/docs/sections/services/snmptrap/logging.rst
+++ b/docs/sections/services/snmptrap/logging.rst
@@ -26,20 +26,18 @@ Defaults are shown below:
.. code-block:: json
- "files": {
- <other json data>
- ...
+ {"files": {
+ "<other json data>": "...",
"roll_frequency": "day",
- "minimum_severity_to_log": 3
- <other json data>
- ...
- },
+ "minimum_severity_to_log": 3,
+ "<other json data>": "..."
+ }}
Roll Frequency
""""""""""""""
-Roll frequency can be modified based on your environment (e.g. if trapd is handling a
+Roll frequency can be modified based on your environment (e.g. if trapd is handling a
heavy trap load, you will probably want files to roll more frequently). Valid "roll_frequency" values are:
- minute
@@ -49,7 +47,7 @@ heavy trap load, you will probably want files to roll more frequently). Valid "
Minimum Severity To Log
"""""""""""""""""""""""
-Logging levels should be modified based on your need. Log levels in lab environments should be "lower"
+Logging levels should be modified based on your need. Log levels in lab environments should be "lower"
(e.g. minimum severity to log = "0" creates verbose logging) vs. production (values of "3" and above is a good choice).
Valid "minimum_severity_to_log" values are:
@@ -74,7 +72,7 @@ values:
.. code-block:: json
- "files": {
+ {"files": {
"runtime_base_dir": "/opt/app/snmptrap",
"log_dir": "logs",
"data_dir": "data",
@@ -84,10 +82,9 @@ values:
"traps_stats_log": "snmptrapd_stats.csv",
"perm_status_file": "snmptrapd_status.log",
"roll_frequency": "hour",
- "minimum_severity_to_log": 2
- <other json data>
- ...
- },
+ "minimum_severity_to_log": 2,
+ "<other json data>": "..."
+ }}
The base directory for all data logs is specified with:
@@ -123,8 +120,8 @@ An example from this log is shown below:
1529960544.4896748 Mon Jun 25 17:02:24 2018; Mon Jun 25 17:02:24 2018 com.att.dcae.dmaap.IST3.DCAE-COLLECTOR-UCSNMP 15299605440000 1.3.6.1.4.1.999.0.1 server001 127.0.0.1 server001 v2c 751564798 0f40196a-78bb-11e8-bac7-005056865aac , "varbinds": [{"varbind_oid": "1.3.6.1.4.1.999.0.1.1", "varbind_type": "OctetString", "varbind_value": "TEST TRAP"}]
-*NOTE:* Format of this log will change with 1.5.0; specifically, "varbinds" section will be reformatted/json struct removed and will be replaced with a flat file format.
-
+*NOTE:* Format of this log will change with 1.5.0; specifically, "varbinds" section will be reformatted/json struct removed and will be replaced with a flat file format.
+
PUBLISHED TRAPS
^^^^^^^^^^^^^^^
@@ -132,7 +129,7 @@ SNMPTRAP's main purpose is to receive and decode SNMP traps, then
publish the results to a configured DMAAP/MR message bus. Traps that
are successfully published (e.g. publish attempt gets a "200/ok"
response from the DMAAP/MR server) are logged to a file named by
-the technology being used combined with the topic being published to.
+the technology being used combined with the topic being published to.
If you find a trap in this published log, it has been acknowledged as
received by DMAAP/MR. If consumers complain of "missing traps", the
@@ -207,18 +204,17 @@ of that JSON configuration that influences EELF logging is:
.. code-block:: json
- "files": {
- <other json data>
- ...
+ {"files": {
+ "<other json data>": "...",
"**eelf_base_dir**": "/opt/app/snmptrap/logs",
"eelf_error": "error.log",
"eelf_debug": "debug.log",
"eelf_audit": "audit.log",
"eelf_metrics": "metrics.log",
- "roll_frequency": "hour",
+ "roll_frequency": "hour"
},
- <other json data>
- ...
+ "<other json data>": "..."
+ }
The base directory for all EELF logs is specified with:
@@ -272,10 +268,10 @@ Messages will be in the general format of:
2018-04-25T17:28:48,034|snmp_engine_observer_cb|snmptrapd||||DETAILED|100||snmp trap arrived from 192.168.1.139, assigned uuid: 0f40196a-78bb-11e8-bac7-005056
2018-04-25T17:28:48,036|notif_receiver_cb|snmptrapd||||DETAILED|100||processing varbinds for 0f40196a-78bb-11e8-bac7-005056
2018-04-25T17:28:48,040|notif_receiver_cb|snmptrapd||||DETAILED|100||adding 0f40196a-78bb-11e8-bac7-005056 to buffer
-
+
2018-06-25T21:02:24,491|notif_receiver_cb|snmptrapd||||DETAILED|100||trap 0f40196a-78bb-11e8-bac7-005056865aac : {"uuid": "0f40196a-78bb-11e8-bac7-005056865aac", "agent address": "192.168.1.139", "agent name": "server001", "cambria.partition": "server001", "community": "", "community len": 0, "epoch_serno": 15299605440000, "protocol version": "v2c", "time received": 1529960544.4896748, "trap category": "com.companyname.dcae.dmaap.location.DCAE-COLLECTOR-UCSNMP", "sysUptime": "751564798", "notify OID": "1.3.6.1.4.1.999.0.1", "notify OID len": 9, "varbinds": [{"varbind_oid": "1.3.6.1.4.1.999.0.1.1", "varbind_type": "OctetString", "varbind_value": "TEST TRAP"}]}
2018-06-25T21:02:24,496|post_dmaap|snmptrapd||||DETAILED|100||post_data_enclosed: {"uuid": "0f40196a-78bb-11e8-bac7-005056865aac", "agent address": "192.168.1.139", "agent name": "server001", "cambria.partition": "server001", "community": "", "community len": 0, "epoch_serno": 15299605440000, "protocol version": "v2c", "time received": 1529960544.4896748, "trap category": "com.att.dcae.dmaap.IST3.DCAE-COLLECTOR-UCSNMP", "sysUptime": "751564798", "notify OID": "1.3.6.1.4.1.999.0.1", "notify OID len": 9, "varbinds": [{"varbind_oid": "1.3.6.1.4.1.999.0.1.1", "varbind_type": "OctetString", "varbind_value": "TEST TRAP"}]}
-
+
Platform Status
^^^^^^^^^^^^^^^
@@ -291,6 +287,6 @@ A permanent (left to user to archive/compress/etc) status file is maintained in
Combined with **runtime_base_dir** and **log_dir** settings from snmptrapd.json, the perm_status_file in default installations
can be found at:
-.. code-block:: json
+.. code-block:: bash
/opt/app/uc/logs/snmptrapd_stats.log
diff --git a/docs/sections/services/snmptrap/offeredapis.rst b/docs/sections/services/snmptrap/offeredapis.rst
index fabaff5f..f5841dee 100644
--- a/docs/sections/services/snmptrap/offeredapis.rst
+++ b/docs/sections/services/snmptrap/offeredapis.rst
@@ -27,10 +27,10 @@ running instance. To accomplish this, you may:
NetSNMP snmptrap
----------------
-One way to simulate an arriving SNMP trap is to use the Net-SNMP utility/command snmptrap.
+One way to simulate an arriving SNMP trap is to use the Net-SNMP utility/command snmptrap.
This command can send V1, V2c or V3 traps to a manager based on the parameters provided.
-The example below sends a SNMP V1 trap to the specified host. Prior to running this command, export
+The example below sends a SNMP V1 trap to the specified host. Prior to running this command, export
the values of *to_ip_address* (set it to the IP of the VM hosting the ONAP trapd container) and *to_port* (typically
set to "162"):
@@ -52,18 +52,18 @@ python using pysnmp
-------------------
Another way to simulate an arriving SNMP trap is to send one with the python *pysnmp* module. (Note that this
-is the same module that ONAP trapd is based on).
+is the same module that ONAP trapd is based on).
-To do this, create a python script called "send_trap.py" with the following contents. You'll need to change the
+To do this, create a python script called "send_trap.py" with the following contents. You'll need to change the
target (from "localhost" to whatever the destination IP/hostname of the trap receiver is) before saving:
.. code-block:: python
from pysnmp.hlapi import *
from pysnmp import debug
-
+
# debug.setLogger(debug.Debug('msgproc'))
-
+
errorIndication, errorStatus, errorIndex, varbinds = next(sendNotification(SnmpEngine(),
CommunityData('not_public'),
UdpTransportTarget(('localhost', 162)),
@@ -72,7 +72,7 @@ target (from "localhost" to whatever the destination IP/hostname of the trap rec
[ObjectType(ObjectIdentity('.1.3.6.1.4.1.999.1'), OctetString('test trap - ignore')),
ObjectType(ObjectIdentity('.1.3.6.1.4.1.999.2'), OctetString('ONAP pytest trap'))])
)
-
+
if errorIndication:
print(errorIndication)
else:
@@ -80,4 +80,6 @@ target (from "localhost" to whatever the destination IP/hostname of the trap rec
To run the pysnmp example:
- ``python ./send_trap.py``
+.. code-block:: bash
+
+ python ./send_trap.py
diff --git a/docs/sections/services/snmptrap/release-notes.rst b/docs/sections/services/snmptrap/release-notes.rst
index 98ea3d40..886c487d 100644
--- a/docs/sections/services/snmptrap/release-notes.rst
+++ b/docs/sections/services/snmptrap/release-notes.rst
@@ -21,11 +21,11 @@ Version: 2.3.0
**Bug Fixes**
-
+
**Known Issues**
**Security Issues**
- - None
+ - None
**Upgrade Notes**
@@ -51,7 +51,7 @@ Version: 1.4.0
**Known Issues**
**Security Issues**
- - None
+ - None
**Upgrade Notes**
@@ -73,12 +73,12 @@ Support for config binding services.
**Bug Fixes**
- `https://jira.onap.org/browse/DCAEGEN2-465`
-
+
**Known Issues**
- `https://jira.onap.org/browse/DCAEGEN2-465` Default config causes standalone instance startup failure.
**Security Issues**
- - None
+ - None
**Upgrade Notes**
@@ -86,4 +86,3 @@ Support for config binding services.
**Deprecation Notes**
**Other**
-
diff --git a/docs/sections/services/son-handler/installation-helm.rst b/docs/sections/services/son-handler/installation-helm.rst
index 3fbc4c08..5641093a 100644
--- a/docs/sections/services/son-handler/installation-helm.rst
+++ b/docs/sections/services/son-handler/installation-helm.rst
@@ -88,7 +88,7 @@ Deployment Steps
- Update monitoring policy ID in below configuration which is used to enable Policy-Sync Side car container to be deployed and retrieves active policy configuration.
- .. code-block:: bash
+ .. code-block:: yaml
dcaePolicySyncImage: onap/org.onap.dcaegen2.deployments.dcae-services-policy-sync:1.0.1
policies:
@@ -97,13 +97,13 @@ Deployment Steps
- Update Config db IP address:
- .. code-block:: bash
+ .. code-block:: yaml
sonhandler.configDb.service: http://<configDB-IPAddress>:8080
- Enable sonhandler component in oom/kubernetes/dcaegen2-services/values.yaml
- .. code-block:: bash
+ .. code-block:: yaml
dcae-son-handler:
enabled: true
diff --git a/docs/sections/services/son-handler/son_handler_overview.rst b/docs/sections/services/son-handler/son_handler_overview.rst
index 767d923d..d65c7f89 100644
--- a/docs/sections/services/son-handler/son_handler_overview.rst
+++ b/docs/sections/services/son-handler/son_handler_overview.rst
@@ -1,6 +1,6 @@
-.. This work is licensed under a Creative Commons Attribution 4.0
- International License. http://creativecommons.org/licenses/by/4.0
-
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
+
.. _docs_SON_Handler_MS:
Architecture
@@ -28,8 +28,8 @@ The logic may not be 100% fool-proof (i.e., cover all possible scenarios and bou
The details of the state machines of all the threads in the core logic are available in https://wiki.onap.org/pages/viewpage.action?pageId=56131985.
-In Frankfurt release, adaptive SON functionality was introduced for PCI optimization. While determining the optimum PCI values to resolve PCI collision and confusion, the optimizer also takes into consideration a set of cells whose PCI values may not be changed during the optimization. Such situations could arise, for example, when the PCI value of a cell could not be updated in the past (due to whatever reason), or configuration policy specifies that certain cells’ PCI values should never be changed. So, the SON-Handler MS keeps track of cells whose PCI values cannot be changed. When triggering OOF for PCI optimization, the SON-Handler MS also provides the list of cells whose PCI values cannot be changed.
-
+In Frankfurt release, adaptive SON functionality was introduced for PCI optimization. While determining the optimum PCI values to resolve PCI collision and confusion, the optimizer also takes into consideration a set of cells whose PCI values may not be changed during the optimization. Such situations could arise, for example, when the PCI value of a cell could not be updated in the past (due to whatever reason), or configuration policy specifies that certain cells' PCI values should never be changed. So, the SON-Handler MS keeps track of cells whose PCI values cannot be changed. When triggering OOF for PCI optimization, the SON-Handler MS also provides the list of cells whose PCI values cannot be changed.
+
Details of Frankfurt implementation are available in https://wiki.onap.org/display/DW/SON-Handler+MS+%28DCAE%29+Impacts.
@@ -60,7 +60,7 @@ This is responsible for registering with the DMaaP client for the DMaaP notifica
Deployment aspects
------------------
-The SON-Handler MS will be deployed on DCAE as an on-demand component. Details of the installation steps are available at ./installation.rst. Further details can be obtained from https://wiki.onap.org/pages/viewpage.action?pageId=76875778
+The SON-Handler MS will be deployed on DCAE as an on-demand component. Details of the installation steps are available at ./installation.rst. Further details can be obtained from https://wiki.onap.org/pages/viewpage.action?pageId=76875778
Known Issues and Resolutions
----------------------------
diff --git a/docs/sections/services/son-handler/son_handler_troubleshooting.rst b/docs/sections/services/son-handler/son_handler_troubleshooting.rst
index 644b0826..bfb990c4 100644
--- a/docs/sections/services/son-handler/son_handler_troubleshooting.rst
+++ b/docs/sections/services/son-handler/son_handler_troubleshooting.rst
@@ -1,23 +1,31 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
+
Troubleshooting steps
---------------------
1. Microservice stops and restarts during startup
- Possible reasons & Solutions:
- 1. Microservice is not registered with the consul
+ Possible reasons & Solutions:
+ 1. Microservice is not registered with the consul
- Check the consul if the microservice is registered with it and the MS is able to fetch the app config from the CBS. Check if CBS and consul are deployed properly and try to redeploy the MS
The below logs will be seen if CBS is not reachable by the MS
- 15:14:13.861 [main] WARN org.postgresql.Driver - JDBC URL port: 0 not valid (1:65535)
+ .. code-block:: none
+
+ 15:14:13.861 [main] WARN org.postgresql.Driver - JDBC URL port: 0 not valid (1:65535)
15:14:13.862 [main] WARN o.s.b.w.s.c.AnnotationConfigServletWebServerApplicationContext - Exception encountered during context initialization - cancelling refresh attempt: org.springframework.beans.factory.UnsatisfiedDependencyException: Error creating bean with name 'org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaConfiguration': Unsatisfied dependency expressed through constructor parameter 0; nested exception is org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'dataSource' defined in org.onap.dcaegen2.services.sonhms.Application: Initialization of bean failed; nested exception is org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'org.springframework.boot.autoconfigure.jdbc.DataSourceInitializerInvoker': Invocation of init method failed; nested exception is org.springframework.jdbc.datasource.init.UncategorizedScriptException: Failed to execute database script; nested exception is java.lang.RuntimeException: Driver org.postgresql.Driver claims to not accept jdbcUrl, jdbc:postgresql://null:0/sonhms
15:14:13.865 [main] INFO o.a.catalina.core.StandardService - Stopping service [Tomcat]
15:14:13.877 [main] INFO o.s.b.a.l.ConditionEvaluationReportLoggingListener - Error starting ApplicationContext. To display the conditions report re-run your application with 'debug' enabled.
15:14:13.880 [main] ERROR o.s.boot.SpringApplication - Application run failed
-
+
2. MS is not able to fetch the config policies from the policy handler.
- Check if the config policy for the MS is created and pushed into the policy module. The below logs will be seen if the config policies are not available.
+ .. code-block:: none
+
2019-05-16 14:48:48.651 LOG <sonhms> [son_policy_widelm.create] INFO: latest policy for policy_id(com.Config_PCIMS_CONFIG_POLICY.1.xml) status(404) response: {}
2019-05-16 14:48:49.661 LOG <sonhms> [son_policy_widelm.create] INFO: exit policy_get
2019-05-16 14:48:49.661 LOG <sonhms> [son_policy_widelm.create] INFO: policy not found for policy_id com.Config_PCIMS_CONFIG_POLICY.1.xml
@@ -34,5 +42,7 @@ Logging
-------
1. Logs can be found either from kubernetes UI or from kubectl. Since, the MS is deployed as a pod in the kubernetes, you can check the logs by using the command
- kubectl logs <pod-name> --namespace onap
+ .. code-block:: bash
+
+ kubectl logs <pod-name> --namespace onap
diff --git a/docs/sections/services/tcagen2-docker/configuration.rst b/docs/sections/services/tcagen2-docker/configuration.rst
index b0bc4820..817bddd5 100644
--- a/docs/sections/services/tcagen2-docker/configuration.rst
+++ b/docs/sections/services/tcagen2-docker/configuration.rst
@@ -39,4 +39,4 @@ Following is default configuration set for TCA during deployment.
tca.enable_ecomp_logging: true
-Complete configuration and input defaults can be found on blueprint here - https://git.onap.org/dcaegen2/platform/blueprints/plain/blueprints/k8s-tcagen2.yaml \ No newline at end of file
+Complete configuration and input defaults can be found on blueprint here - https://git.onap.org/dcaegen2/platform/blueprints/plain/blueprints/k8s-tcagen2.yaml
diff --git a/docs/sections/services/tcagen2-docker/functionality.rst b/docs/sections/services/tcagen2-docker/functionality.rst
index 8b729b1d..985c844e 100644
--- a/docs/sections/services/tcagen2-docker/functionality.rst
+++ b/docs/sections/services/tcagen2-docker/functionality.rst
@@ -1,7 +1,11 @@
+.. This work is licensed under a
+ Creative Commons Attribution 4.0 International License.
+ http://creativecommons.org/licenses/by/4.0
+
Functionality
=============
-TCA-gen2 is driven by the VES collector events published into Message Router. This Message Router topic is the source for the CDAP application which will read each incoming message. If a message meets the VES (CEF, v28.4) as specified by the VES 5.4 standard, it will be parsed and if it contains a message which matches the policy configuration for a given metric (denoted primarily by the "eventName" and the "fieldPath"), the value of the metric will be compared to the "thresholdValue". If that comparison indicates that a Control Loop Event Message should be generated, the application will output the alarm to the Message Router topic in a format that matches the interface spec defined for Control-Loop by ONAP-Policy
+TCA-gen2 is driven by the VES collector events published into Message Router. This Message Router topic is the source for the CDAP application which will read each incoming message. If a message meets the VES (CEF, v28.4) as specified by the VES 5.4 standard, it will be parsed and if it contains a message which matches the policy configuration for a given metric (denoted primarily by the "eventName" and the "fieldPath"), the value of the metric will be compared to the "thresholdValue". If that comparison indicates that a Control Loop Event Message should be generated, the application will output the alarm to the Message Router topic in a format that matches the interface spec defined for Control-Loop by ONAP-Policy
Assumptions:
diff --git a/docs/sections/services/tcagen2-docker/index.rst b/docs/sections/services/tcagen2-docker/index.rst
index c5fc85da..c22cf23b 100644
--- a/docs/sections/services/tcagen2-docker/index.rst
+++ b/docs/sections/services/tcagen2-docker/index.rst
@@ -15,8 +15,7 @@ The TCA-gen2 is docker based mS intended to replace TCA/cdap version, which was
.. toctree::
:maxdepth: 1
- ./installation
./installation-helm.rst
./configuration
./functionality
- ./delivery \ No newline at end of file
+ ./delivery
diff --git a/docs/sections/services/tcagen2-docker/installation-helm.rst b/docs/sections/services/tcagen2-docker/installation-helm.rst
index e4a25d9b..9bc12336 100644
--- a/docs/sections/services/tcagen2-docker/installation-helm.rst
+++ b/docs/sections/services/tcagen2-docker/installation-helm.rst
@@ -92,4 +92,3 @@ Application Configurations
+-------------------------------+------------------------------------------------+
|tca.enable_ecomp_logging | Enable ecomp logging |
+-------------------------------+------------------------------------------------+
-
diff --git a/docs/sections/services/ves-http/architecture.rst b/docs/sections/services/ves-http/architecture.rst
index 2e7a0039..56c31456 100644
--- a/docs/sections/services/ves-http/architecture.rst
+++ b/docs/sections/services/ves-http/architecture.rst
@@ -11,9 +11,9 @@ VES Processing Flow
-------------------
1. Collector supports different URI based on single or batch event to be received.
-2. Post authentication – events are validated against schema. At this point – appropriate return code is sent to client when validation fails.
+2. Post authentication - events are validated against schema. At this point - appropriate return code is sent to client when validation fails.
3. Event Processor checks against transformation rules (if enabled) and handles VES output standardization (e.g. VES 7.x input to VES5.4 output).
-4. Optional (activated by flag *collector.externalSchema.checkflag*) post authentication of stndDefined fields – specific fields are validated against schema. At this point – appropriate return code is sent to client when validation fails.
+4. Optional (activated by flag *collector.externalSchema.checkflag*) post authentication of stndDefined fields - specific fields are validated against schema. At this point - appropriate return code is sent to client when validation fails.
5. If no problems were detected during previous steps, success HTTP code is being returned.
6. Based on domain (or stndDefinedNamespace), events are asynchronously distributed to configurable topics.
1. If topic mapping does not exist, event distribution is skipped.
@@ -51,17 +51,17 @@ Features Supported
- General schema validation (against standard VES definition)
- StndDefined fields schema validation
- Mapping of external schemas to local schema files during stndDefined validation
-- Multiple schema support and backward compatibility
+- Multiple schema support and backward compatibility
- Configurable event transformation
-- Configurable suppression
+- Configurable suppression
- Publish events into Dmaap Topic (with/without AAF)
-The collector can receive events via standard HTTP port (8080) or secure port (8443). Depending on the install/configuration – either one or both can be supported (ports are also modifiable).
+The collector can receive events via standard HTTP port (8080) or secure port (8443). Depending on the install/configuration - either one or both can be supported (ports are also modifiable).
Dynamic configuration fed into Collector via DCAEPlatform
---------------------------------------------------------
-- Outbound Dmaap/UEB topic
+- Outbound Dmaap/UEB topic
- Schema version to be validated against
- Authentication account for VNF
@@ -70,5 +70,3 @@ POST requests result in standard HTTP status codes:
- 200-299 Success
- 400-499 Client request has a problem (data error)
- 500-599 Collector service has a problem
-
-
diff --git a/docs/sections/services/ves-http/configuration.rst b/docs/sections/services/ves-http/configuration.rst
index 6f98b29e..2100108d 100644
--- a/docs/sections/services/ves-http/configuration.rst
+++ b/docs/sections/services/ves-http/configuration.rst
@@ -7,6 +7,7 @@ Configuration
VES expects to be able to fetch configuration directly from consul service in following JSON format:
.. code-block:: json
+
{
"collector.dynamic.config.update.frequency": "5",
"event.transform.flag": "0",
diff --git a/docs/sections/services/ves-http/delivery.rst b/docs/sections/services/ves-http/delivery.rst
index 0e5e826a..0c90d34a 100644
--- a/docs/sections/services/ves-http/delivery.rst
+++ b/docs/sections/services/ves-http/delivery.rst
@@ -6,4 +6,4 @@ Delivery
**VES** is delivered as a docker container and published in ONAP Nexus repository following image naming convention.
-Full image name is `onap/org.onap.dcaegen2.collectors.ves.vescollector`. \ No newline at end of file
+Full image name is `onap/org.onap.dcaegen2.collectors.ves.vescollector`.
diff --git a/docs/sections/services/ves-http/installation-helm.rst b/docs/sections/services/ves-http/installation-helm.rst
index d5f6bdd6..f251f61d 100644
--- a/docs/sections/services/ves-http/installation-helm.rst
+++ b/docs/sections/services/ves-http/installation-helm.rst
@@ -26,7 +26,7 @@ For example:
helm -n onap upgrade dev-dcaegen2-services --reuse-values --values new-config.yaml oom/kubernetes/dcaegen2-services
Where the contents of ``new-config.yaml`` file is:
- .. code-block:: bash
+ .. code-block:: yaml
dcae-ves-collector:
applicationConfig:
@@ -39,11 +39,13 @@ For small changes like this, it is also possible to inline the new value:
After the upgrade, the new auth method value should be visible inside dev-dcae-ves-collector-application-config-configmap Config-Map.
It can be verified by running:
+
.. code-block:: bash
kubectl -n onap get cm <config map name> -o yaml
For VES Collector:
+
.. code-block:: bash
kubectl -n onap get cm dev-dcae-ves-collector-application-config-configmap -o yaml
@@ -55,9 +57,9 @@ External repository schema files integration with VES Collector
---------------------------------------------------------------
In order to utilize the externalRepo openAPI schema files defined in `OOM <https://gerrit.onap.org/r/gitweb?p=oom.git;a=tree;f=kubernetes/dcaegen2-services/resources/external>`_ repository and installed with dcaegen2 module, follow below steps.
-.. note::
+.. note::
For more information on generating schema files, see `External-schema-repo-generator (OOM Utils repository) <https://gerrit.onap.org/r/gitweb?p=oom/utils.git;a=tree;f=external-schema-repo-generator>`_
-
+
Default ONAP deployment for Istanbul release makes available the SA88-Rel16 OpenAPI schema files; optionally SA99-Rel16 files can be loaded using the `Generator script <https://gerrit.onap.org/r/gitweb?p=oom/utils.git;a=blob;f=external-schema-repo-generator/generator/generate.sh>`_ based on the steps documented in `README <https://git.onap.org/oom/utils/tree/external-schema-repo-generator/README.md>`_
@@ -89,7 +91,7 @@ E.g:
optional: true
- name: 'dev-dcae-external-repo-configmap-sa88-rel16'
type: configmap
- mountPath: /opt/app/VESCollector/etc/externalRepo/3gpp/rep/sa5/MnS/blob/SA88-Rel16/OpenAPI
+ mountPath: /opt/app/VESCollector/etc/externalRepo/3gpp/rep/sa5/MnS/blob/SA88-Rel16/OpenAPI
optional: true
If more than a single external schema is required add new config map to object 'externalVolumes' like in above example. Make sure that all external schemas (all openAPI files) are reflected in the schema-map file.
@@ -112,10 +114,10 @@ Using external TLS certificates obtained using CMP v2 protocol
In order to use the X.509 certificates obtained from the CMP v2 server (so called "operator`s certificates"), refer to the following description:
-:ref:`Enabling TLS with external x.509 certificates <external-tls-helm>`
+:ref:`Enabling TLS with external x.509 certificates <tls_enablement>`
Example values for VES Collector:
- .. code-block:: bash
+ .. code-block:: yaml
global:
cmpv2Enabled: true
@@ -135,4 +137,3 @@ Example values for VES Collector:
name: ves-cmpv2-keystore-password
key: password
create: true
-
diff --git a/docs/sections/services/ves-http/stnd-defined-validation.rst b/docs/sections/services/ves-http/stnd-defined-validation.rst
index 6449922e..6089e662 100644
--- a/docs/sections/services/ves-http/stnd-defined-validation.rst
+++ b/docs/sections/services/ves-http/stnd-defined-validation.rst
@@ -253,9 +253,9 @@ scenario.
+---------------------+------------------------------------------------------------------+
| Text | "Invalid input value for %1 %2: %3" |
+---------------------+------------------------------------------------------------------+
- | Variables | %1 – “attribute” |
- | | %2 – "event.stndDefinedFields.schemaReference" |
- | | %3 – "Referred external schema not present in schema repository" |
+ | Variables | %1 - "attribute" |
+ | | %2 - "event.stndDefinedFields.schemaReference" |
+ | | %3 - "Referred external schema not present in schema repository" |
+---------------------+------------------------------------------------------------------+
| HTTP status code(s) | 400 Bad request |
+---------------------+------------------------------------------------------------------+
@@ -301,8 +301,8 @@ scenario.
+---------------------+-----------------------------------------------------+
| Text | Mandatory input %1 %2 is missing from request |
+---------------------+-----------------------------------------------------+
- | Variables | %1 – “attribute” |
- | | %2 – "event.commonEventHeader.stndDefinedNamespace" |
+ | Variables | %1 - "attribute" |
+ | | %2 - "event.commonEventHeader.stndDefinedNamespace" |
+---------------------+-----------------------------------------------------+
| HTTP status code(s) | 400 Bad Request |
+---------------------+-----------------------------------------------------+
@@ -316,8 +316,8 @@ scenario.
+---------------------+-----------------------------------------------------+
| Text | Mandatory input %1 %2 is empty in request |
+---------------------+-----------------------------------------------------+
- | Variables | %1 – “attribute” |
- | | %2 – "event.commonEventHeader.stndDefinedNamespace" |
+ | Variables | %1 - "attribute" |
+ | | %2 - "event.commonEventHeader.stndDefinedNamespace" |
+---------------------+-----------------------------------------------------+
| HTTP status code(s) | 400 Bad Request |
+---------------------+-----------------------------------------------------+
@@ -331,12 +331,9 @@ scenario.
+---------------------+-------------------------------------------------------------------------------------------------------------------------------------------+
| Text | "Invalid input value for %1 %2: %3" |
+---------------------+-------------------------------------------------------------------------------------------------------------------------------------------+
- | Variables | %1 – “attribute” |
- | | %2 – "event.commonEventHeader.stndDefinedNamespace" |
- | | %3 – "stndDefinedNamespace received not present in VES Collector routing configuration. Unable to route event to appropriate DMaaP topic" |
+ | Variables | %1 - "attribute" |
+ | | %2 - "event.commonEventHeader.stndDefinedNamespace" |
+ | | %3 - "stndDefinedNamespace received not present in VES Collector routing configuration. Unable to route event to appropriate DMaaP topic" |
+---------------------+-------------------------------------------------------------------------------------------------------------------------------------------+
| HTTP status code(s) | 400 Bad request |
+---------------------+-------------------------------------------------------------------------------------------------------------------------------------------+
-
-
-
diff --git a/docs/sections/services/ves-http/tls-authentication.rst b/docs/sections/services/ves-http/tls-authentication.rst
index 12301383..d13fa305 100644
--- a/docs/sections/services/ves-http/tls-authentication.rst
+++ b/docs/sections/services/ves-http/tls-authentication.rst
@@ -1,5 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
+
.. raw:: html
<style> .red {color:red} </style>
diff --git a/docs/sections/services/ves-hv/design.rst b/docs/sections/services/ves-hv/design.rst
index 899b0c05..6e3ba858 100644
--- a/docs/sections/services/ves-hv/design.rst
+++ b/docs/sections/services/ves-hv/design.rst
@@ -29,7 +29,7 @@ Extendability
HV-VES is designed to allow extending by adding new domain-specific proto files.
-The proto file (with the VES CommonHeader) comes with a binary-type **Payload** parameter, where domain-specific data should be placed.
+The proto file (with the VES CommonHeader) comes with a binary-type **Payload** parameter, where domain-specific data should be placed.
Domain-specific data are encoded as well with GPB. A domain-specific proto file is required to decode the data.
This domain-specific proto has to be shared with analytics applications - HV-VES does not analyze domain-specific data.
@@ -39,7 +39,7 @@ Additional domains can be defined based on existing VES domains (like Fault, Hea
There is also **stndDefined** domain supported by default in HV-VES. Events with this domain are expected to contain
data payload described by OpenAPI schemas. HV-VES doesn't decode payload of stndDefined events thus it does not contain
specific **stndDefined** proto files. The only difference of **stndDefined** domain is its specific routing. More
-details of stndDefined routing: :ref:`_stndDefined_domain`.
+details of stndDefined routing: :ref:`stndDefined_domain`.
GPB proto files are backwards compatible, and a new domain can be added without affecting existing systems.
@@ -53,5 +53,3 @@ Implementation details
- Netty is used by means of reactor-netty library.
- Kotlin is used to write concise code with great interoperability with existing Java libraries.
- Types defined in Λrrow library are also used when it improves readability or general cleanness of the code.
-
-
diff --git a/docs/sections/services/ves-hv/healthcheck-and-monitoring.rst b/docs/sections/services/ves-hv/healthcheck-and-monitoring.rst
index 9d35e1ef..a2b77272 100644
--- a/docs/sections/services/ves-hv/healthcheck-and-monitoring.rst
+++ b/docs/sections/services/ves-hv/healthcheck-and-monitoring.rst
@@ -17,7 +17,7 @@ and ready for connections. Otherwise it returns a **HTTP 503 Service Unavailable
Monitoring
----------
-HV-VES collector allows to collect metrics data at runtime. To serve this purpose HV-VES application exposes an endpoint **GET /monitoring/prometheus**
+HV-VES collector allows to collect metrics data at runtime. To serve this purpose HV-VES application exposes an endpoint **GET /monitoring/prometheus**
which returns a **HTTP 200 OK** message with a specific data in its body. Returned data is in a format readable by Prometheus service.
Prometheus endpoint shares a port with healthchecks.
diff --git a/docs/sections/services/ves-hv/index.rst b/docs/sections/services/ves-hv/index.rst
index ccd8730e..c8f5f263 100644
--- a/docs/sections/services/ves-hv/index.rst
+++ b/docs/sections/services/ves-hv/index.rst
@@ -26,7 +26,7 @@ High Volume VES Collector overview and functions
.. toctree::
:maxdepth: 1
-
+
architecture
design
repositories
diff --git a/docs/sections/services/ves-hv/installation-helm.rst b/docs/sections/services/ves-hv/installation-helm.rst
index b9bf6da9..87aca24b 100644
--- a/docs/sections/services/ves-hv/installation-helm.rst
+++ b/docs/sections/services/ves-hv/installation-helm.rst
@@ -4,7 +4,7 @@
HV-VES Helm Installation
========================
-Starting from ONAP/Honolulu release, HV-VES is installed with a DCAEGEN2-Services Helm charts.
+Starting from ONAP/Honolulu release, HV-VES is installed with a DCAEGEN2-Services Helm charts.
HV-VES application is configured by default to use TLS/SSL encryption on TCP connection.
Disable TLS security - Helm based deployment
@@ -22,7 +22,7 @@ For example:
helm -n onap upgrade dev-dcaegen2-services --reuse-values --values new-config.yaml oom/kubernetes/dcaegen2-services
Where the contents of ``new-config.yaml`` file is:
- .. code-block:: bash
+ .. code-block:: yaml
dcae-hv-ves-collector:
applicationConfig:
@@ -35,17 +35,20 @@ For small changes like this, it is also possible to inline the new value:
After the upgrade, the security.sslDisable property should be changed and visible inside dev-dcae-ves-collector-application-config-configmap Config-Map.
It can be verified by running:
+
.. code-block:: bash
kubectl -n onap get cm <config map name> -o yaml
For HV-VES Collector:
+
.. code-block:: bash
kubectl -n onap get cm dev-dcae-hv-ves-collector-application-config-configmap -o yaml
For apply new configuration by HV-VES Collector the application restart might be necessary. It could be done by HV-VES helm reinstallation:
+
.. code-block:: bash
helm -n onap upgrade dev-dcaegen2-services --reuse-values --set dcae-hv-ves-collector.enabled="false" oom/kubernetes/dcaegen2-services
@@ -57,10 +60,11 @@ Using external TLS certificates obtained using CMP v2 protocol
In order to use the X.509 certificates obtained from the CMP v2 server (so called "operator`s certificates"), refer to the following description:
-:ref:`Enabling TLS with external x.509 certificates <external-tls-helm>`
+:ref:`Enabling TLS with external x.509 certificates <tls_enablement>`
Example values for HV-VES Collector:
- .. code-block:: bash
+
+ .. code-block:: yaml
global:
cmpv2Enabled: true
@@ -80,4 +84,3 @@ Example values for HV-VES Collector:
name: hv-ves-cmpv2-keystore-password
key: password
create: true
-
diff --git a/docs/sections/services/ves-hv/resources/base-configuration.json b/docs/sections/services/ves-hv/resources/base-configuration.json
index 6580287d..10cc39e6 100644
--- a/docs/sections/services/ves-hv/resources/base-configuration.json
+++ b/docs/sections/services/ves-hv/resources/base-configuration.json
@@ -9,4 +9,4 @@
"security.keys.keyStorePasswordFile": "/etc/ves-hv/ssl/server.pass",
"security.keys.trustStoreFile": "/etc/ves-hv/ssl/trust.p12",
"security.keys.trustStorePasswordFile": "/etc/ves-hv/ssl/trust.pass"
-} \ No newline at end of file
+}
diff --git a/docs/sections/services/ves-hv/resources/metrics_sample_response.txt b/docs/sections/services/ves-hv/resources/metrics_sample_response.txt
index da54e3ea..813cdc15 100644
--- a/docs/sections/services/ves-hv/resources/metrics_sample_response.txt
+++ b/docs/sections/services/ves-hv/resources/metrics_sample_response.txt
@@ -64,4 +64,4 @@ hvves_connections_active 1.0
jvm_gc_live_data_size_bytes 7634496.0
hvves_messages_latency_seconds_max 1.5459828692292638E9
hvves_messages_latency_seconds_count 20000.0
-hvves_messages_latency_seconds_sum 2.91400110035487E9 \ No newline at end of file
+hvves_messages_latency_seconds_sum 2.91400110035487E9
diff --git a/docs/sections/services/ves-hv/run-time-configuration.rst b/docs/sections/services/ves-hv/run-time-configuration.rst
index 4da0b10d..09696d55 100644
--- a/docs/sections/services/ves-hv/run-time-configuration.rst
+++ b/docs/sections/services/ves-hv/run-time-configuration.rst
@@ -6,7 +6,7 @@
Run-Time configuration
======================
-HV-VES dynamic configuration is primarily meant to provide DMaaP Connection Objects (see :ref:`dmaap-connection-objects`).
+HV-VES dynamic configuration is primarily meant to provide DMaaP Connection Objects.
.. note:: Kafka config info.
In the case of HV-VES, this configuration method is purely used as a generic reference.
@@ -48,4 +48,4 @@ For more information, see :ref:`supported_domains`.
Providing configuration during OOM deployment
---------------------------------------------
-The configuration is created from HV-VES Helm charts defined under **applicationConfig** during ONAP OOM/Kubernetes deployment.
+The configuration is created from HV-VES Helm charts defined under **applicationConfig** during ONAP OOM/Kubernetes deployment.
diff --git a/docs/sections/services/ves-hv/troubleshooting.rst b/docs/sections/services/ves-hv/troubleshooting.rst
index c7b6a291..750a878c 100644
--- a/docs/sections/services/ves-hv/troubleshooting.rst
+++ b/docs/sections/services/ves-hv/troubleshooting.rst
@@ -163,7 +163,7 @@ To resolve this issue, you can either wait for that Kafka service to be availabl
The above log is printed when the message payload size is too big.
-**HV-VES** does not handle messages that exceed maximum payload size specified under streams_publishes configuration (see :ref:`dmaap-connection-objects`)
+**HV-VES** does not handle messages that exceed maximum payload size.
====
diff --git a/docs/sections/services/ves-openapi-manager/architecture.rst b/docs/sections/services/ves-openapi-manager/architecture.rst
index 47d036d4..59a2aa7a 100644
--- a/docs/sections/services/ves-openapi-manager/architecture.rst
+++ b/docs/sections/services/ves-openapi-manager/architecture.rst
@@ -27,4 +27,3 @@ VES OpenAPI Manager workflow can be split into phases:
VES OpenAPI Manager workflow is presented on the diagram below.
.. image:: resources/workflow.png
-
diff --git a/docs/sections/services/ves-openapi-manager/artifacts.rst b/docs/sections/services/ves-openapi-manager/artifacts.rst
index f42a470a..0a9aafd4 100644
--- a/docs/sections/services/ves-openapi-manager/artifacts.rst
+++ b/docs/sections/services/ves-openapi-manager/artifacts.rst
@@ -23,4 +23,3 @@ Repository
----------
Repository with the code of VES OpenAPI Manager is available on ONAP Gerrit:
`Gerrit <https://gerrit.onap.org/r/admin/repos/dcaegen2/platform/ves-openapi-manager>`_
-
diff --git a/docs/sections/services/ves-openapi-manager/resources/artifact-no-stndDefined.yaml b/docs/sections/services/ves-openapi-manager/resources/artifact-no-stndDefined.yaml
index 73c6919e..ad1b7aa2 100644
--- a/docs/sections/services/ves-openapi-manager/resources/artifact-no-stndDefined.yaml
+++ b/docs/sections/services/ves-openapi-manager/resources/artifact-no-stndDefined.yaml
@@ -34,4 +34,4 @@ event:
keyValuePair: {presence: required, structure: {key: {presence: required, value: fileFormatType}, value: {presence: required, value: org.3GPP.32.435}}},
keyValuePair: {presence: required, structure: {key: {presence: required, value: fileFormatVersion}, value: {presence: required, value: V10}}}}
}
-... \ No newline at end of file
+...
diff --git a/docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined-no-schemaReference.yaml b/docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined-no-schemaReference.yaml
index 6bba640e..a4238001 100644
--- a/docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined-no-schemaReference.yaml
+++ b/docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined-no-schemaReference.yaml
@@ -26,4 +26,4 @@ event:
data: {presence: required}
stndDefinedFieldsVersion: {presence: required, value: "1.0"}
-... \ No newline at end of file
+...
diff --git a/docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined.yaml b/docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined.yaml
index ad3ca469..581f0290 100644
--- a/docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined.yaml
+++ b/docs/sections/services/ves-openapi-manager/resources/artifact-stndDefined.yaml
@@ -27,4 +27,4 @@ event:
data: {presence: required}
stndDefinedFieldsVersion: {presence: required, value: "1.0"}
-... \ No newline at end of file
+...
diff --git a/docs/sections/services/ves-openapi-manager/resources/schema-map-example.json b/docs/sections/services/ves-openapi-manager/resources/schema-map-example.json
index e11851c0..2e56400b 100644
--- a/docs/sections/services/ves-openapi-manager/resources/schema-map-example.json
+++ b/docs/sections/services/ves-openapi-manager/resources/schema-map-example.json
@@ -11,4 +11,4 @@
"publicURL": "https://forge.3gpp.org/rep/sa5/MnS/blob/SA88-Rel16/OpenAPI/faultMnS.yaml",
"localURL": "3gpp/rep/sa5/MnS/tree/SA88-Rel16/OpenAPI/faultMnS.yaml"
}
-] \ No newline at end of file
+]
diff --git a/docs/sections/services/ves-openapi-manager/resources/schema-map-invalid.json b/docs/sections/services/ves-openapi-manager/resources/schema-map-invalid.json
index fb34f5ae..ad591d3f 100644
--- a/docs/sections/services/ves-openapi-manager/resources/schema-map-invalid.json
+++ b/docs/sections/services/ves-openapi-manager/resources/schema-map-invalid.json
@@ -3,4 +3,4 @@
"publicURL": "https://forge.3gpp.org/rep/sa5/MnS/tree/SA88-Rel16/OpenAPI/streamingDataMnS.yaml",
"localURL": "3gpp/rep/sa5/MnS/tree/SA88-Rel16/OpenAPI/streamingDataMnS.yaml"
}
-] \ No newline at end of file
+]
diff --git a/docs/sections/services/ves-openapi-manager/resources/schema-map.json b/docs/sections/services/ves-openapi-manager/resources/schema-map.json
index 97883fbc..2a6a889d 100644
--- a/docs/sections/services/ves-openapi-manager/resources/schema-map.json
+++ b/docs/sections/services/ves-openapi-manager/resources/schema-map.json
@@ -3,4 +3,4 @@
"publicURL": "https://forge.3gpp.org/rep/sa5/MnS/blob/SA88-Rel16/OpenAPI/faultMnS.yaml",
"localURL": "3gpp/rep/sa5/MnS/tree/SA88-Rel16/OpenAPI/faultMnS.yaml"
}
-] \ No newline at end of file
+]
diff --git a/docs/sections/services/ves-openapi-manager/use-cases.rst b/docs/sections/services/ves-openapi-manager/use-cases.rst
index aa3a4bf8..a44c311b 100644
--- a/docs/sections/services/ves-openapi-manager/use-cases.rst
+++ b/docs/sections/services/ves-openapi-manager/use-cases.rst
@@ -114,4 +114,4 @@ Validation results
There are two ways to receive validation results.
1) Via SDC UI. Results are available in *Service->Distributions* view. To see results in SDC UI user has to wait up to few minutes.
-2) In VES OpenAPI Manager logs. They are printed right after validation. \ No newline at end of file
+2) In VES OpenAPI Manager logs. They are printed right after validation.