summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/sections/apis/DFC.json621
-rw-r--r--docs/sections/apis/DFC.rst187
-rw-r--r--docs/sections/apis/ves-hv/index.rst5
-rw-r--r--docs/sections/apis/ves-hv/supported-domains.rst2
-rw-r--r--docs/sections/services/bbs-event-processor/installation.rst101
-rw-r--r--docs/sections/services/dfc/architecture.rst18
-rw-r--r--docs/sections/services/dfc/certificates.rst115
-rw-r--r--docs/sections/services/dfc/configuration.rst10
-rw-r--r--docs/sections/services/dfc/consumedapis.rst72
-rw-r--r--docs/sections/services/dfc/index.rst2
-rw-r--r--docs/sections/services/ves-hv/architecture.rst2
-rw-r--r--docs/sections/services/ves-hv/deployment.rst155
-rw-r--r--docs/sections/services/ves-hv/design.rst4
-rw-r--r--docs/sections/services/ves-hv/example-event.rst2
-rw-r--r--docs/sections/services/ves-hv/healthcheck-and-monitoring.rst4
-rw-r--r--docs/sections/services/ves-hv/index.rst4
-rw-r--r--docs/sections/services/ves-hv/resources/ONAP_VES_HV_Architecture.png (renamed from docs/sections/services/ves-hv/ONAP_VES_HV_Architecture.png)bin46724 -> 46724 bytes
-rw-r--r--docs/sections/services/ves-hv/resources/WTP.yaml (renamed from docs/sections/services/ves-hv/WTP.yaml)0
-rw-r--r--docs/sections/services/ves-hv/resources/base-configuration.json12
-rw-r--r--docs/sections/services/ves-hv/resources/blueprint-snippet.yaml24
-rw-r--r--docs/sections/services/ves-hv/resources/dynamic-configuration.json28
-rw-r--r--docs/sections/services/ves-hv/resources/metrics_sample_response.txt (renamed from docs/sections/services/ves-hv/metrics_sample_response.txt)0
-rw-r--r--docs/sections/services/ves-hv/run-time-configuration.rst64
-rw-r--r--docs/sections/services/ves-hv/troubleshooting.rst729
-rw-r--r--platformdoc/docs/components/dcae-cli/blueprint-generator/blueprint_generator.rst54
25 files changed, 1242 insertions, 973 deletions
diff --git a/docs/sections/apis/DFC.json b/docs/sections/apis/DFC.json
new file mode 100644
index 00000000..08d03993
--- /dev/null
+++ b/docs/sections/apis/DFC.json
@@ -0,0 +1,621 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "description": "This page lists all the rest apis for DATAFILE app server.",
+ "version": "1.0",
+ "title": "DATAFILE app server"
+ },
+ "host": "localhost:8100",
+ "basePath": "/",
+ "tags": [
+ {
+ "name": "heartbeat-controller",
+ "description": "Heartbeat Controller"
+ },
+ {
+ "name": "operation-handler",
+ "description": "Operation Handler"
+ },
+ {
+ "name": "schedule-controller",
+ "description": "Schedule Controller"
+ },
+ {
+ "name": "web-mvc-links-handler",
+ "description": "Web Mvc Links Handler"
+ }
+ ],
+ "paths": {
+ "/actuator": {
+ "get": {
+ "tags": [
+ "web-mvc-links-handler"
+ ],
+ "summary": "links",
+ "operationId": "linksUsingGET",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/Link"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/health": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET_2",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/health/{component}": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET_1",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/health/{component}/{instance}": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/info": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET_3",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/logfile": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET_4",
+ "produces": [
+ "application/octet-stream"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/loggers": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET_6",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/loggers/{name}": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET_5",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ },
+ "post": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingPOST",
+ "consumes": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "produces": [
+ "*/*"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "201": {
+ "description": "Created"
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/metrics": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET_8",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/actuator/metrics/{requiredMetricName}": {
+ "get": {
+ "tags": [
+ "operation-handler"
+ ],
+ "summary": "handle",
+ "operationId": "handleUsingGET_7",
+ "produces": [
+ "application/json",
+ "application/vnd.spring-boot.actuator.v2+json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "body",
+ "required": false,
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "object"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/heartbeat": {
+ "get": {
+ "tags": [
+ "heartbeat-controller"
+ ],
+ "summary": "Returns liveness of DATAFILE service",
+ "operationId": "heartbeatUsingGET",
+ "produces": [
+ "*/*"
+ ],
+ "responses": {
+ "200": {
+ "description": "DATAFILE service is living",
+ "schema": {
+ "$ref": "#/definitions/Mono«ResponseEntity«string»»"
+ }
+ },
+ "401": {
+ "description": "You are not authorized to view the resource"
+ },
+ "403": {
+ "description": "Accessing the resource you were trying to reach is forbidden"
+ },
+ "404": {
+ "description": "The resource you were trying to reach is not found"
+ }
+ }
+ }
+ },
+ "/start": {
+ "get": {
+ "tags": [
+ "schedule-controller"
+ ],
+ "summary": "Start scheduling worker request",
+ "operationId": "startTasksUsingGET",
+ "produces": [
+ "*/*"
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/Mono«ResponseEntity«string»»"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ },
+ "/stopDatafile": {
+ "get": {
+ "tags": [
+ "schedule-controller"
+ ],
+ "summary": "Receiving stop scheduling worker request",
+ "operationId": "stopTaskUsingGET",
+ "produces": [
+ "*/*"
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/Mono«ResponseEntity«string»»"
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ },
+ "403": {
+ "description": "Forbidden"
+ },
+ "404": {
+ "description": "Not Found"
+ }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "Link": {
+ "type": "object",
+ "properties": {
+ "href": {
+ "type": "string"
+ },
+ "templated": {
+ "type": "boolean"
+ }
+ },
+ "title": "Link"
+ },
+ "Map«string,Link»": {
+ "type": "object",
+ "title": "Map«string,Link»",
+ "additionalProperties": {
+ "$ref": "#/definitions/Link"
+ }
+ },
+ "Mono«ResponseEntity«string»»": {
+ "type": "object",
+ "title": "Mono«ResponseEntity«string»»"
+ }
+ }
+} \ No newline at end of file
diff --git a/docs/sections/apis/DFC.rst b/docs/sections/apis/DFC.rst
index 57e2914c..f345d93f 100644
--- a/docs/sections/apis/DFC.rst
+++ b/docs/sections/apis/DFC.rst
@@ -2,7 +2,7 @@
DFC (DataFile Collector)
========================
-:Date: 2018-09-21
+:Date: 2019-04-24
.. contents::
:depth: 3
@@ -16,184 +16,11 @@ Component description can be found under `DFC`_.
.. _DFC: ../../sections/services/dfc/index.html
-Paths
-=====
+Offered APIs
+============
-GET /events/unauthenticated.VES_NOTIFICATION_OUTPUT
----------------------------------------------------
+.. csv-table::
+ :header: "API name", "Swagger JSON"
+ :widths: 10,5
-Description
-~~~~~~~~~~~
-
-Reads fileReady events from DMaaP (Data Movement as a Platform)
-
-
-Responses
-~~~~~~~~~
-
-+-----------+---------------------+
-| HTTP Code | Description |
-+===========+=====================+
-| **200** | successful response |
-+-----------+---------------------+
-
-
-
-POST /publish
--------------
-
-Description
-~~~~~~~~~~~
-
-Publish the collected file/s as a stream to DataRouter
- - file as stream
- - compression
- - fileFormatType
- - fileFormatVersion
-
-
-Responses
-~~~~~~~~~
-
-+-----------+---------------------+
-| HTTP Code | Description |
-+===========+=====================+
-| **200** | successful response |
-+-----------+---------------------+
-
-Compiling DFC
-=============
-
-Whole project (top level of DFC directory) and each module (sub module directory) can be compiled using
-`mvn clean install` command.
-
-Configuration file: Config/datafile_endpoints.json
-
-Maven GroupId:
-==============
-
-org.onap.dcaegen2.collectors
-
-Maven Parent ArtifactId:
-========================
-
-dcae-collectors
-
-Maven Children Artifacts:
-=========================
-
-1. datafile-app-server: DFC server
-2. datafile-dmaap-client: Contains implementation of DmaaP client
-3. datafile-commons: Common code for whole DFC modules
-4. docker-compose: Contains the docker-compose
-
-Configuration of Certificates in test environment(For FTP over TLS):
-====================================================================
-
-DFC supports two protocols: FTPES and SFTP.
-For FTPES, it is mutual authentication with certificates.
-In our test environment, we use vsftpd to simulate xNF, and we generate self-signed
-keys & certificates on both vsftpd server and DFC.
-
-1. Generate key/certificate with openssl for DFC:
--------------------------------------------------
-.. code:: bash
-
- openssl genrsa -out dfc.key 2048
- openssl req -new -out dfc.csr -key dfc.key
- openssl x509 -req -days 365 -in dfc.csr -signkey dfc.key -out dfc.crt
-
-2. Generate key & certificate with openssl for vsftpd:
-------------------------------------------------------
-.. code:: bash
-
- openssl genrsa -out ftp.key 2048
- openssl req -new -out ftp.csr -key ftp.key
- openssl x509 -req -days 365 -in ftp.csr -signkey ftp.key -out ftp.crt
-
-3. Configure java keystore in DFC:
-----------------------------------
-We have two keystore files, one for TrustManager, one for KeyManager.
-
-**For TrustManager:**
-
-1. First, convert your certificate in a DER format :
-
- .. code:: bash
-
- openssl x509 -outform der -in ftp.crt -out ftp.der
-
-2. And after, import it in the keystore :
-
- .. code:: bash
-
- keytool -import -alias ftp -keystore ftp.jks -file ftp.der
-
-**For KeyManager:**
-
-1. First, create a jks keystore:
-
- .. code:: bash
-
- keytool -keystore dfc.jks -genkey -alias dfc
-
-2. Second, import dfc.crt and dfc.key to dfc.jks. This is a bit troublesome.
-
- 1). Step one: Convert x509 Cert and Key to a pkcs12 file
-
- .. code:: bash
-
- openssl pkcs12 -export -in dfc.crt -inkey dfc.key -out dfc.p12 -name [some-alias]
-
- Note: Make sure you put a password on the p12 file - otherwise you'll get a null reference exception when yy to import it. (In case anyone else had this headache).
-
- Note 2: You might want to add the -chainoption to preserve the full certificate chain.
-
- 2). Step two: Convert the pkcs12 file to a java keystore:
-
- .. code:: bash
-
- keytool -importkeystore -deststorepass [changeit] -destkeypass [changeit] -destkeystore dfc.jks -srckeystore dfc.p12 -srcstoretype PKCS12 -srcstorepass [some-password] -alias [some-alias]
-
-3. Finished
-
-4. Configure vsftpd:
---------------------
- update /etc/vsftpd/vsftpd.conf:
-
- .. code-block:: bash
-
- rsa_cert_file=/etc/ssl/private/ftp.crt
- rsa_private_key_file=/etc/ssl/private/ftp.key
- ssl_enable=YES
- allow_anon_ssl=NO
- force_local_data_ssl=YES
- force_local_logins_ssl=YES
-
- ssl_tlsv1=YES
- ssl_sslv2=YES
- ssl_sslv3=YES
-
- require_ssl_reuse=NO
- ssl_ciphers=HIGH
-
- require_cert=YES
- ssl_request_cert=YES
- ca_certs_file=/home/vsftpd/myuser/dfc.crt
-
-5. Configure config/datafile_endpoints.json:
---------------------------------------------
- Update the file accordingly:
-
- .. code-block:: javascript
-
- "ftpesConfiguration": {
- "keyCert": "/config/dfc.jks",
- "keyPassword": "[yourpassword]",
- "trustedCA": "/config/ftp.jks",
- "trustedCAPassword": "[yourpassword]"
- }
-
-6. This has been tested with vsftpd and dfc, with self-signed certificates.
----------------------------------------------------------------------------
- In real deployment, we should use ONAP-CA signed certificate for DFC, and vendor-CA signed certificate for xNF
+ "Datafile Collector API", ":download:`link <DFC.json>`"
diff --git a/docs/sections/apis/ves-hv/index.rst b/docs/sections/apis/ves-hv/index.rst
index d87d1aa0..c61c1e16 100644
--- a/docs/sections/apis/ves-hv/index.rst
+++ b/docs/sections/apis/ves-hv/index.rst
@@ -58,6 +58,9 @@ By default, **HV-VES** will use routing defined in **k8s-hv-ves.yaml-template**
- perf3gpp -> HV_VES_PERF3GPP
+
+.. _supported_domains:
+
Supported domains
=================
@@ -65,7 +68,7 @@ Domains supported by **HV-VES**:
- perf3gpp
-For domains descriptions, see :ref:`supported_domains`
+For domains descriptions, see :ref:`domains_supported_by_hvves`
.. _hv_ves_behaviors:
diff --git a/docs/sections/apis/ves-hv/supported-domains.rst b/docs/sections/apis/ves-hv/supported-domains.rst
index d1badaa6..68d5d226 100644
--- a/docs/sections/apis/ves-hv/supported-domains.rst
+++ b/docs/sections/apis/ves-hv/supported-domains.rst
@@ -1,7 +1,7 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. _supported_domains:
+.. _domains_supported_by_hvves:
Domains supported by HV-VES
===========================
diff --git a/docs/sections/services/bbs-event-processor/installation.rst b/docs/sections/services/bbs-event-processor/installation.rst
index 5c00c0d3..242f9f9b 100644
--- a/docs/sections/services/bbs-event-processor/installation.rst
+++ b/docs/sections/services/bbs-event-processor/installation.rst
@@ -7,58 +7,77 @@ The following docker-compose-yaml file shows a default configuration. The file c
.. code-block:: yaml
-version: '3'
-services:
- bbs-event-processor:
- image: onap/org.onap.dcaegen2.services.components.bbs-event-processor:latest
- container_name: bbs-event-processor
- hostname: bbs-event-processor
- ports:
- - 32100:8100
- environment:
- CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_DMAAPHOSTNAME: 10.133.115.190
- CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_DMAAPPORTNUMBER: 30227
- CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_DMAAPTOPICNAME: /events/unauthenticated.PNF_UPDATE
- CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_CONSUMERGROUP: foo
- CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_CONSUMERID: bar
- CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_DMAAPHOSTNAME: 10.133.115.190
- CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_DMAAPPORTNUMBER: 30227
- CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_DMAAPTOPICNAME: /events/unauthenticated.CPE_AUTHENTICATION
- CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_CONSUMERGROUP: foo
- CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_CONSUMERID: bar
- CONFIGS_DMAAP_PRODUCER_DMAAPHOSTNAME: 10.133.115.190
- CONFIGS_DMAAP_PRODUCER_DMAAPPORTNUMBER: 30227
- CONFIGS_DMAAP_PRODUCER_DMAAPTOPICNAME: /events/unauthenticated.DCAE_CL_OUTPUT
- CONFIGS_AAI_CLIENT_AAIHOST: 10.133.115.190
- CONFIGS_AAI_CLIENT_AAIPORT: 30233
- CONFIGS_APPLICATION_PIPELINESPOLLINGINTERVALSEC: 30
- CONFIGS_APPLICATION_PIPELINESTIMEOUTSEC: 15
- CONFIGS_APPLICATION_RE-REGISTRATION_POLICYSCOPE: policyScope
- CONFIGS_APPLICATION_RE-REGISTRATION_CLCONTROLNAME: controName
- CONFIGS_APPLICATION_CPE-AUTHENTICATION_POLICYSCOPE: policyScope
- CONFIGS_APPLICATION_CPE-AUTHENTICATION_CLCONTROLNAME: controlName
- LOGGING_LEVEL_ORG_ONAP_BBS: TRACE
+ version: '3'
+ services:
+ bbs-event-processor:
+ image: onap/org.onap.dcaegen2.services.components.bbs-event-processor:latest
+ container_name: bbs-event-processor
+ hostname: bbs-event-processor
+ ports:
+ - 32100:8100
+ environment:
+ CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_DMAAPHOSTNAME: 10.133.115.190
+ CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_DMAAPPORTNUMBER: 30227
+ CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_DMAAPTOPICNAME: /events/unauthenticated.PNF_UPDATE
+ CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_CONSUMERGROUP: foo
+ CONFIGS_DMAAP_CONSUMER_RE-REGISTRATION_CONSUMERID: bar
+ CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_DMAAPHOSTNAME: 10.133.115.190
+ CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_DMAAPPORTNUMBER: 30227
+ CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_DMAAPTOPICNAME: /events/unauthenticated.CPE_AUTHENTICATION
+ CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_CONSUMERGROUP: foo
+ CONFIGS_DMAAP_CONSUMER_CPE-AUTHENTICATION_CONSUMERID: bar
+ CONFIGS_DMAAP_PRODUCER_DMAAPHOSTNAME: 10.133.115.190
+ CONFIGS_DMAAP_PRODUCER_DMAAPPORTNUMBER: 30227
+ CONFIGS_DMAAP_PRODUCER_DMAAPTOPICNAME: /events/unauthenticated.DCAE_CL_OUTPUT
+ CONFIGS_AAI_CLIENT_AAIHOST: 10.133.115.190
+ CONFIGS_AAI_CLIENT_AAIPORT: 30233
+ CONFIGS_APPLICATION_PIPELINESPOLLINGINTERVALSEC: 30
+ CONFIGS_APPLICATION_PIPELINESTIMEOUTSEC: 15
+ CONFIGS_APPLICATION_RE-REGISTRATION_POLICYSCOPE: policyScope
+ CONFIGS_APPLICATION_RE-REGISTRATION_CLCONTROLNAME: controName
+ CONFIGS_APPLICATION_CPE-AUTHENTICATION_POLICYSCOPE: policyScope
+ CONFIGS_APPLICATION_CPE-AUTHENTICATION_CLCONTROLNAME: controlName
+ LOGGING_LEVEL_ORG_ONAP_BBS: TRACE
For Dublin release, it will be a DCAE component that can dynamically be deployed via Cloudify blueprint installation.
Steps to deploy are shown below
- Transfer blueprint component file in DCAE bootstrap POD under /blueprints directory. Blueprint can be found in
- https://gerrit.onap.org/r/gitweb?p=dcaegen2/services.git;a=blob_plain;f=components/bbs-event-processor/dpo/blueprints/k8s-bbs-event-processor.yaml-template;hb=refs/heads/master
+
+ https://gerrit.onap.org/r/gitweb?p=dcaegen2/services.git;a=blob_plain;f=components/bbs-event-processor/dpo/blueprints/k8s-bbs-event-processor.yaml-template;hb=refs/heads/master
- Transfer blueprint component inputs file in DCAE bootstrap POD under / directory. Blueprint inputs file can be found in
- https://gerrit.onap.org/r/gitweb?p=dcaegen2/services.git;a=blob_plain;f=components/bbs-event-processor/dpo/blueprints/bbs-event-processor-input.yaml;h=36e69cf64bee3b46ee2e1b95f1a16380b7046482;hb=refs/heads/master
+
+ https://gerrit.onap.org/r/gitweb?p=dcaegen2/services.git;a=blob_plain;f=components/bbs-event-processor/dpo/blueprints/bbs-event-processor-input.yaml;hb=refs/heads/master
- Enter the Bootstrap POD
- Validate blueprint
- cfy blueprints validate /blueprints/k8s-bbs-event-processor.yaml-template
+ .. code-block:: bash
+
+ cfy blueprints validate /blueprints/k8s-bbs-event-processor.yaml-template
- Upload validated blueprint
- cfy blueprints upload -b bbs-ep /blueprints/k8s-bbs-event-processor.yaml-template
+ .. code-block:: bash
+
+
+ cfy blueprints upload -b bbs-ep /blueprints/k8s-bbs-event-processor.yaml-template
- Create deployment
- cfy deployments create -b bbs-ep -i /bbs-event-processor-input.yaml bbs-ep
+ .. code-block:: bash
+
+
+ cfy deployments create -b bbs-ep -i /bbs-event-processor-input.yaml bbs-ep
- Deploy blueprint
- cfy executions start -d bbs-ep install
+ .. code-block:: bash
+
+
+ cfy executions start -d bbs-ep install
To undeploy BBS-ep, steps are shown below
-- Validate blueprint by running command
- cfy uninstall bbs-ep
-- Validate blueprint by running command
- cfy blueprints delete bbs-ep \ No newline at end of file
+- Uninstall running BBS-ep and delete deployment
+ .. code-block:: bash
+
+
+ cfy uninstall bbs-ep
+- Delete blueprint
+ .. code-block:: bash
+
+
+ cfy blueprints delete bbs-ep \ No newline at end of file
diff --git a/docs/sections/services/dfc/architecture.rst b/docs/sections/services/dfc/architecture.rst
index 73597541..ac0c8d14 100644
--- a/docs/sections/services/dfc/architecture.rst
+++ b/docs/sections/services/dfc/architecture.rst
@@ -39,4 +39,20 @@ The event is received from the Message Router (MR), the files are fetched from a
(DR).
Both fetching of a file and publishing is retried a number of times with an increasing delay between each attempt.
After a number of attempts, the DFC will log an error message and give up. Failing of processing of one file does not
-affect the handling of others. \ No newline at end of file
+affect the handling of others.
+
+Maven GroupId:
+==============
+
+org.onap.dcaegen2.collectors
+
+Maven Parent ArtifactId:
+========================
+
+dcae-collectors
+
+Maven Children Artifacts:
+=========================
+
+1. datafile-app-server: DFC server
+
diff --git a/docs/sections/services/dfc/certificates.rst b/docs/sections/services/dfc/certificates.rst
new file mode 100644
index 00000000..17bfb2f3
--- /dev/null
+++ b/docs/sections/services/dfc/certificates.rst
@@ -0,0 +1,115 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+Certificates
+============
+
+Configuration of Certificates in test environment(For FTP over TLS):
+
+DFC supports two protocols: FTPES and SFTP.
+For FTPES, it is mutual authentication with certificates.
+In our test environment, we use vsftpd to simulate xNF, and we generate self-signed
+keys & certificates on both vsftpd server and DFC.
+
+1. Generate key/certificate with openssl for DFC:
+-------------------------------------------------
+.. code:: bash
+
+ openssl genrsa -out dfc.key 2048
+ openssl req -new -out dfc.csr -key dfc.key
+ openssl x509 -req -days 365 -in dfc.csr -signkey dfc.key -out dfc.crt
+
+2. Generate key & certificate with openssl for vsftpd:
+------------------------------------------------------
+.. code:: bash
+
+ openssl genrsa -out ftp.key 2048
+ openssl req -new -out ftp.csr -key ftp.key
+ openssl x509 -req -days 365 -in ftp.csr -signkey ftp.key -out ftp.crt
+
+3. Configure java keystore in DFC:
+----------------------------------
+We have two keystore files, one for TrustManager, one for KeyManager.
+
+**For TrustManager:**
+
+1. First, convert your certificate in a DER format :
+
+ .. code:: bash
+
+ openssl x509 -outform der -in ftp.crt -out ftp.der
+
+2. And after, import it in the keystore :
+
+ .. code:: bash
+
+ keytool -import -alias ftp -keystore ftp.jks -file ftp.der
+
+**For KeyManager:**
+
+1. First, create a jks keystore:
+
+ .. code:: bash
+
+ keytool -keystore dfc.jks -genkey -alias dfc
+
+2. Second, import dfc.crt and dfc.key to dfc.jks. This is a bit troublesome.
+
+ 1). Step one: Convert x509 Cert and Key to a pkcs12 file
+
+ .. code:: bash
+
+ openssl pkcs12 -export -in dfc.crt -inkey dfc.key -out dfc.p12 -name [some-alias]
+
+ Note: Make sure you put a password on the p12 file - otherwise you'll get a null reference exception when you try to import it.
+
+ Note 2: You might want to add the -chainoption to preserve the full certificate chain.
+
+ 2). Step two: Convert the pkcs12 file to a java keystore:
+
+ .. code:: bash
+
+ keytool -importkeystore -deststorepass [changeit] -destkeypass [changeit] -destkeystore dfc.jks -srckeystore dfc.p12 -srcstoretype PKCS12 -srcstorepass [some-password] -alias [some-alias]
+
+3. Finished
+
+4. Configure vsftpd:
+--------------------
+ update /etc/vsftpd/vsftpd.conf:
+
+ .. code-block:: bash
+
+ rsa_cert_file=/etc/ssl/private/ftp.crt
+ rsa_private_key_file=/etc/ssl/private/ftp.key
+ ssl_enable=YES
+ allow_anon_ssl=NO
+ force_local_data_ssl=YES
+ force_local_logins_ssl=YES
+
+ ssl_tlsv1=YES
+ ssl_sslv2=YES
+ ssl_sslv3=YES
+
+ require_ssl_reuse=NO
+ ssl_ciphers=HIGH
+
+ require_cert=YES
+ ssl_request_cert=YES
+ ca_certs_file=/home/vsftpd/myuser/dfc.crt
+
+5. Configure config/datafile_endpoints.json:
+--------------------------------------------
+ Update the file accordingly:
+
+ .. code-block:: javascript
+
+ "ftpesConfiguration": {
+ "keyCert": "/config/dfc.jks",
+ "keyPassword": "[yourpassword]",
+ "trustedCA": "/config/ftp.jks",
+ "trustedCAPassword": "[yourpassword]"
+ }
+
+6. This has been tested with vsftpd and dfc, with self-signed certificates.
+---------------------------------------------------------------------------
+ In real deployment, we should use ONAP-CA signed certificate for DFC, and vendor-CA signed certificate for xNF
diff --git a/docs/sections/services/dfc/configuration.rst b/docs/sections/services/dfc/configuration.rst
index 22f50eeb..b8d0df95 100644
--- a/docs/sections/services/dfc/configuration.rst
+++ b/docs/sections/services/dfc/configuration.rst
@@ -7,6 +7,16 @@ Configuration
**datafile** configuration is controlled via a single JSON file called datafile_endpoints.json.
This is located under datafile-app-server/config.
+Compiling DFC
+=============
+
+Whole project (top level of DFC directory) and each module (sub module directory) can be compiled using
+`mvn clean install` command.
+
+Configuration file: Config/datafile_endpoints.json
+
+
+
JSON CONFIGURATION EXPLAINED
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/sections/services/dfc/consumedapis.rst b/docs/sections/services/dfc/consumedapis.rst
new file mode 100644
index 00000000..0ab10498
--- /dev/null
+++ b/docs/sections/services/dfc/consumedapis.rst
@@ -0,0 +1,72 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+Paths
+=====
+
+GET /events/unauthenticated.VES_NOTIFICATION_OUTPUT
+---------------------------------------------------
+
+Description
+~~~~~~~~~~~
+
+Reads fileReady events from DMaaP (Data Movement as a Platform)
+
+
+Responses
+~~~~~~~~~
+
++-----------+---------------------+
+| HTTP Code | Description |
++===========+=====================+
+| **200** | successful response |
++-----------+---------------------+
+
+GET /FEEDLOG_TOPIC/DEFAULT_FEED_ID?type=pub&filename=FILENAME
+-------------
+
+Description
+~~~~~~~~~~~
+
+Querying the Data Router to check whether a file has been published previously.
+
+Responses
+~~~~~~~~~
+
++-----------+------------+-----------------------+
+| HTTP Code | Body | Description |
++===========+============+=======================+
+| **400** | NA | error in query |
++-----------+------------+-----------------------+
+| **200** | [] | Not published yet |
++-----------+------------+-----------------------+
+| **200** | [$FILENAME]| Already published |
++-----------+------------+-----------------------+
+
+POST /publish
+-------------
+
+Description
+~~~~~~~~~~~
+
+Publish the collected file/s as a stream to DataRouter
+ - file as stream
+ - compression
+ - fileFormatType
+ - fileFormatVersion
+ - productName
+ - vendorName
+ - lastEpochMicrosec
+ - sourceName
+ - startEpochMicrosec
+ - timeZoneOffset
+
+
+Responses
+~~~~~~~~~
+
++-----------+---------------------+
+| HTTP Code | Description |
++===========+=====================+
+| **200** | successful response |
++-----------+---------------------+ \ No newline at end of file
diff --git a/docs/sections/services/dfc/index.rst b/docs/sections/services/dfc/index.rst
index 176c403c..780d63fc 100644
--- a/docs/sections/services/dfc/index.rst
+++ b/docs/sections/services/dfc/index.rst
@@ -14,6 +14,8 @@ DATAFILE COLLECTOR MS (DFC)
./delivery.rst
./logging.rst
./installation.rst
+ ./certificates.rst
./configuration.rst
+ ./consumedapis.rst
./administration.rst
./release-notes.rst
diff --git a/docs/sections/services/ves-hv/architecture.rst b/docs/sections/services/ves-hv/architecture.rst
index 986e8bb3..1812f4ee 100644
--- a/docs/sections/services/ves-hv/architecture.rst
+++ b/docs/sections/services/ves-hv/architecture.rst
@@ -12,7 +12,7 @@ High Volume Collector is deployed with DCAEGEN2 via OOM Helm charts and Cloudify
Input messages come from TCP interface and Wire Transfer Protocol. Each frame includes Google Protocol Buffers (GPB) encoded payload.
Based on information provided in CommonEventHeader, domain messages are validated and published to specific Kafka topic in DMaaP.
-.. image:: ONAP_VES_HV_Architecture.png
+.. image:: resources/ONAP_VES_HV_Architecture.png
Messages published in DMaaP's Kafka topic will be consumed by DCAE analytics application or other ONAP component that consumes messages from DMaaP/Kafka.
DMaaP serves direct access to Kafka allowing other analytics applications to utilize its data.
diff --git a/docs/sections/services/ves-hv/deployment.rst b/docs/sections/services/ves-hv/deployment.rst
index 07d26b94..3a14eb18 100644
--- a/docs/sections/services/ves-hv/deployment.rst
+++ b/docs/sections/services/ves-hv/deployment.rst
@@ -7,83 +7,106 @@
Deployment
============
-To run HV-VES Collector container, you need to specify required parameters by passing them as command
-line arguments either by using long form (--long-form) or short form (-s) followed by argument if needed.
+To run HV-VES Collector container you need to specify required command line options and environment variables.
-All parameters can also be configured by specifying environment variables. These variables have to be named after command line option name
-rewritten using `UPPER_SNAKE_CASE` and prepended with `VESHV_` prefix, for example `VESHV_LISTEN_PORT`.
+Command line parameters can be specified either by using long form (--long-form) or short form (-s) followed by argument if needed (see `Arg` column in table below). These parameters can be omitted if corresponding environment variables are set. These variables are named after command line option name rewritten using `UPPER_SNAKE_CASE` and prepended with `VESHV_` prefix, for example `VESHV_CONFIGURATION_FILE`.
-Command line options have precedence over environment variables.
+Command line options have precedence over environment variables in cases when both are present.
-+-------------+------------+-------------------+----------+-----+-------------------------------------------------+
-| Long form | Short form | Env form | Required | Arg | Description |
-+=============+============+===================+==========+=====+=================================================+
-| listen-port | p | VESHV_LISTEN_PORT | yes | yes | Port on which HV-VES listens internally |
-+-------------+------------+-------------------+----------+-----+-------------------------------------------------+
-| config-url | c | VESHV_CONFIG_URL | yes | yes | URL of HV-VES configuration on Consul service |
-+-------------+------------+-------------------+----------+-----+-------------------------------------------------+
+Currently HV-VES requires single command line parameter which points to base configuration file.
-HV-VES requires also to specify if SSL should be used when handling incoming TCP connections.
-This can be done by passing the flag below to the command line.
+.. csv-table::
+ :widths: auto
+ :delim: ;
+ :header: Long form , Short form , Arg , Env form , Description
-+-------------+------------+-------------------+----------+-----+-------------------------------------------------+
-| Long form | Short form | Env form | Required | Arg | Description |
-+=============+============+===================+==========+=====+=================================================+
-| ssl-disable | l | VESHV_SSL_DISABLE | no | no | Disables SSL encryption |
-+-------------+------------+-------------------+----------+-----+-------------------------------------------------+
+ configuration-file ; c ; yes ; VESHV_CONFIGURATION_FILE ; Path to JSON file containing HV-VES configuration
+Environment variables that are required by HV-VES are used by collector for provisioning of run-time configuration and are usually provided by DCAE platform.
-Minimal command for running the container:
+.. csv-table::
+ :widths: auto
+ :delim: ;
+ :header: Environment variable name , Description
-.. code-block:: bash
+ CONSUL_HOST ; Hostname under which Consul service is available
+ CONFIG_BINDING_SERVICE ; Hostname under which Config Binding Service is available
+ HOSTNAME ; Configuration key of HV-VES as seen by CBS, usually *dcae-hv-ves-collector*
+
+There is also optional command line parameter which configures container-internal port for Healthcheck Server API (see :ref:`healthcheck_and_monitoring`).
+
+.. csv-table::
+ :widths: auto
+ :delim: ;
+ :header: Long form , Short form , Arg , Env form , Description
+
+ health-check-api-port ; H ; yes ; VESHV_HEALTH_CHECK_API_PORT ; Health check rest api listen port
+
+.. _configuration_file:
+
+Configuration file
+------------------
+
+File must provide base configuration for HV-VES Collector in JSON format.
+
+Some entries in configuration can also be obtained from Config Binding Service (see :ref:`run_time_configuration`). **Every entry defined in configuration file will be OVERRIDEN if it is also present in run-time configuration.**
+
+Following JSON shows every possible configuration option. Default file shipped with HV-VES container can be found in the collector's repository (see :ref:`repositories`).
+
+.. literalinclude:: resources/base-configuration.json
+ :language: json
+
+
+The configuration is split into smaller sections.
+Tables show restrictions on fields in file configuration and short description.
+
+.. csv-table::
+ :widths: auto
+ :delim: ;
+ :header-rows: 2
+
+ Server
+ Key ; Value type ; Description
+ server.listenPort ; number ; Port on which HV-VES listens internally
+ server.idleTimeoutSec ; number ; Idle timeout for remote hosts. After given time without any data exchange, the connection might be closed
+
+.. csv-table::
+ :widths: auto
+ :delim: ;
+ :header-rows: 2
+
+ Config Binding Service
+ Key ; Value type ; Description
+ cbs.firstRequestDelaySec ; number ; Delay of first request to Config Binding Service in seconds
+ cbs.requestIntervalSec ; number ; Interval of configuration requests in seconds
+
+.. csv-table::
+ :widths: auto
+ :delim: ;
+ :header-rows: 2
+
+ Security
+ Key ; Value type ; Description
+ security.sslDisable ; boolean ; Disables SSL encryption
+ security.keys.keyStoreFile ; String ; Key store path used in HV-VES incoming connections
+ security.keys.keyStorePasswordFile ; String ; Key store password file used in HV-VES incoming connections
+ security.keys.trustStoreFile ; String ; Path to file with trusted certificates bundle used in HV-VES incoming connections
+ security.keys.trustStorePasswordFile ; String ; Trust store password file used in HV-VES incoming connections
+
+All security entries are mandatory with `security.sslDisable` set to `false`. Otherwise only `security.sslDisable` needs to be specified. If `security.sslDisable` flag is missing, then it is interpreted same as it would be set to `false`.
+
+.. csv-table::
+ :widths: auto
+ :delim: ;
+ :header-rows: 2
+
+ Uncategorized
+ Key ; Value type ; Description
+ logLevel ; String ; Log level on which HV-VES publishes all log messages. Valid argument values are (case insensitive): ERROR, WARN, INFO, DEBUG, TRACE.
- docker run nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main --listen-port 6061 --config-url http://consul:8500/v1/kv/dcae-hv-ves-collector --ssl-disable
-
-Optional configuration parameters:
-
-+-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
-| Long form | Short form | Env form | Required | Arg | Default | Description |
-+=======================+============+============================+==========+=====+=================+=======================================================+
-| health-check-api-port | H | VESHV_HEALTHCHECK_API_PORT | no | yes | 6060 | Health check REST API listen port |
-+-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
-| first-request-delay | d | VESHV_FIRST_REQUEST_DELAY | no | yes | 10 | Delay of first request to Consul service in seconds |
-+-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
-| request-interval | I | VESHV_REQUEST_INTERVAL | no | yes | 5 | Interval of Consul configuration requests in seconds |
-+-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
-| idle-timeout-sec | i | VESHV_IDLE_TIMEOUT_SEC | no | yes | 60 | Idle timeout for remote hosts. After given time |
-| | | | | | | without any data exchange, the connection |
-| | | | | | | might be closed. |
-+-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
-| max-payload-size | m | VESHV_MAX_PAYLOAD_SIZE | no | yes | 1048576 (1 MiB) | Maximum supported payload size in bytes |
-+-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
-| log-level | ll | VESHV_LOG_LEVEL | no | yes | INFO | Log level on which HV-VES publishes all log messages |
-| | | | | | | Valid argument values are (case insensitive): ERROR, |
-| | | | | | | WARN, INFO, DEBUG, TRACE. |
-+-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
-
-As part of experimental API if you do not specify `ssl-disable` flag, there is need to specify additional
-parameters for security configuration.
-
-+-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
-| Long form | Short form | Env form | Required | Arg | Default | Description |
-+=======================+============+============================+==========+=====+========================+==============================================================+
-| key-store | k | VESHV_KEY_STORE | no | yes | /etc/ves-hv/server.p12 | Key store in PKCS12 format path |
-+-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
-| key-store-password | kp | VESHV_KEY_STORE_PASSWORD | no | yes | | Key store password |
-+-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
-| trust-store | t | VESHV_TRUST_STORE | no | yes | /etc/ves-hv/trust.p12 | File with trusted certificate bundle in PKCS12 format path |
-+-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
-| trust-store-password | tp | VESHV_TRUST_STORE_PASSWORD | no | yes | | Trust store password |
-+-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
-
-Passwords are mandatory without ssl-disable flag. If key-store or trust-store location is not specified, HV-VES will try to read them from default locations.
-
-These parameters can be configured either by passing command line option during `docker run` call or
-by specifying environment variables named after command line option name
-rewritten using `UPPER_SNAKE_CASE` and prepended with `VESHV_` prefix e.g. `VESHV_LISTEN_PORT`.
Horizontal Scaling
-==================
+------------------
Kubernetes command line tool (`kubectl`) is recommended for manual horizontal scaling of HV-VES Collector.
diff --git a/docs/sections/services/ves-hv/design.rst b/docs/sections/services/ves-hv/design.rst
index a6c2b864..fb4fa2c7 100644
--- a/docs/sections/services/ves-hv/design.rst
+++ b/docs/sections/services/ves-hv/design.rst
@@ -33,13 +33,13 @@ The proto file (with the VES CommonHeader) comes with a binary-type **Payload**
Domain-specific data are encoded as well with GPB. A domain-specific proto file is required to decode the data.
This domain-specific proto has to be shared with analytics applications - HV-VES does not analyze domain-specific data.
-In order to support the RT-PM use-case, HV-VES includes a **perf3gpp** domain proto file. Within this domain, high volume data are expected to be reported to HV-VES collector.
+In order to support the RT-PM use-case, HV-VES uses a **perf3gpp** domain proto file. Within this domain, high volume data are expected to be reported to HV-VES collector.
Additional domains can be defined based on existing VES domains (like Fault, Heartbeat) or completely new domains. New domains can be added when needed.
GPB proto files are backwards compatible, and a new domain can be added without affecting existing systems.
Analytics applications have to be equipped with the new domain-specific proto file as well.
-Currently, these additional, domain specific proto files can be added to respective repos of HV-VES collector.
+Currently, these additional, domain specific proto files can be added to hv-ves-client protobuf library repository (artifactId: hvvesclient-protobuf).
Implementation details
----------------------
diff --git a/docs/sections/services/ves-hv/example-event.rst b/docs/sections/services/ves-hv/example-event.rst
index 3a335395..a413d401 100644
--- a/docs/sections/services/ves-hv/example-event.rst
+++ b/docs/sections/services/ves-hv/example-event.rst
@@ -11,5 +11,5 @@ The message consists of several parts. Each part is encoded in a different way.
Values of fields can be changed according to types specified in noted definition files.
-.. literalinclude:: WTP.yaml
+.. literalinclude:: resources/WTP.yaml
:language: yaml
diff --git a/docs/sections/services/ves-hv/healthcheck-and-monitoring.rst b/docs/sections/services/ves-hv/healthcheck-and-monitoring.rst
index 18333778..9d35e1ef 100644
--- a/docs/sections/services/ves-hv/healthcheck-and-monitoring.rst
+++ b/docs/sections/services/ves-hv/healthcheck-and-monitoring.rst
@@ -9,7 +9,7 @@ Healthcheck and Monitoring
Healthcheck
-----------
Inside HV-VES docker container runs a small HTTP service for healthcheck. Port for healthchecks can be configured
-at deployment using ``--health-check-api-port`` command line option or via `VESHV_HEALTHCHECK_API_PORT` environment variable (for details see :ref:`deployment`).
+at deployment using command line (for details see :ref:`deployment`).
This service exposes endpoint **GET /health/ready** which returns a **HTTP 200 OK** when HV-VES is healthy
and ready for connections. Otherwise it returns a **HTTP 503 Service Unavailable** message with a short reason of unhealthiness.
@@ -90,4 +90,4 @@ JVM metrics:
Sample response for **GET monitoring/prometheus**:
-.. literalinclude:: metrics_sample_response.txt
+.. literalinclude:: resources/metrics_sample_response.txt
diff --git a/docs/sections/services/ves-hv/index.rst b/docs/sections/services/ves-hv/index.rst
index 5bb83ddc..144f557e 100644
--- a/docs/sections/services/ves-hv/index.rst
+++ b/docs/sections/services/ves-hv/index.rst
@@ -29,11 +29,11 @@ High Volume VES Collector overview and functions
architecture
design
- run-time-configuration
repositories
deployment
- troubleshooting
+ run-time-configuration
HV-VES Offered APIs <../../apis/ves-hv/index>
authorization
example-event
healthcheck-and-monitoring
+ troubleshooting
diff --git a/docs/sections/services/ves-hv/ONAP_VES_HV_Architecture.png b/docs/sections/services/ves-hv/resources/ONAP_VES_HV_Architecture.png
index 7652b970..7652b970 100644
--- a/docs/sections/services/ves-hv/ONAP_VES_HV_Architecture.png
+++ b/docs/sections/services/ves-hv/resources/ONAP_VES_HV_Architecture.png
Binary files differ
diff --git a/docs/sections/services/ves-hv/WTP.yaml b/docs/sections/services/ves-hv/resources/WTP.yaml
index 835ab309..835ab309 100644
--- a/docs/sections/services/ves-hv/WTP.yaml
+++ b/docs/sections/services/ves-hv/resources/WTP.yaml
diff --git a/docs/sections/services/ves-hv/resources/base-configuration.json b/docs/sections/services/ves-hv/resources/base-configuration.json
new file mode 100644
index 00000000..6580287d
--- /dev/null
+++ b/docs/sections/services/ves-hv/resources/base-configuration.json
@@ -0,0 +1,12 @@
+{
+ "logLevel": "INFO",
+ "server.listenPort": 6061,
+ "server.idleTimeoutSec": 60,
+ "cbs.firstRequestDelaySec": 10,
+ "cbs.requestIntervalSec": 5,
+ "security.sslDisable": false,
+ "security.keys.keyStoreFile": "/etc/ves-hv/ssl/server.p12",
+ "security.keys.keyStorePasswordFile": "/etc/ves-hv/ssl/server.pass",
+ "security.keys.trustStoreFile": "/etc/ves-hv/ssl/trust.p12",
+ "security.keys.trustStorePasswordFile": "/etc/ves-hv/ssl/trust.pass"
+} \ No newline at end of file
diff --git a/docs/sections/services/ves-hv/resources/blueprint-snippet.yaml b/docs/sections/services/ves-hv/resources/blueprint-snippet.yaml
new file mode 100644
index 00000000..912c0c5a
--- /dev/null
+++ b/docs/sections/services/ves-hv/resources/blueprint-snippet.yaml
@@ -0,0 +1,24 @@
+node_templates:
+ hv-ves:
+ properties:
+ application_config:
+ logLevel: "INFO"
+ server.listenPort: 6061
+ server.idleTimeoutSec: 60
+ cbs.requestIntervalSec: 5
+ security.sslDisable: false
+ security.keys.keyStoreFile: "/etc/ves-hv/ssl/cert.jks"
+ security.keys.keyStorePasswordFile: "/etc/ves-hv/ssl/jks.pass"
+ security.keys.trustStoreFile: "/etc/ves-hv/ssl/trust.jks"
+ security.keys.trustStorePasswordFile: "/etc/ves-hv/ssl/trust.pass"
+ stream_publishes:
+ perf3gpp:
+ type: "kafka"
+ kafka_info:
+ bootstrap_servers: "message-router-kafka:9092"
+ topic_name: "HV_VES_PERF3GPP"
+ heartbeat:
+ type: "kafka"
+ kafka_info:
+ bootstrap_servers: "message-router-kafka:9092"
+ topic_name: "HV_VES_HEARTBEAT"
diff --git a/docs/sections/services/ves-hv/resources/dynamic-configuration.json b/docs/sections/services/ves-hv/resources/dynamic-configuration.json
new file mode 100644
index 00000000..0a1cd89d
--- /dev/null
+++ b/docs/sections/services/ves-hv/resources/dynamic-configuration.json
@@ -0,0 +1,28 @@
+{
+ "logLevel": "INFO",
+ "server.listenPort": 6061,
+ "server.idleTimeoutSec": 60,
+ "cbs.requestIntervalSec": 5,
+ "security.sslDisable": false,
+ "security.keys.keyStoreFile": "/etc/ves-hv/ssl/cert.jks",
+ "security.keys.keyStorePasswordFile": "/etc/ves-hv/ssl/jks.pass",
+ "security.keys.trustStoreFile": "/etc/ves-hv/ssl/trust.jks",
+ "security.keys.trustStorePasswordFile": "/etc/ves-hv/ssl/trust.pass",
+ "streams_publishes": {
+ "perf3gpp": {
+ "type": "kafka",
+ "kafka_info": {
+ "bootstrap_servers": "message-router-kafka:9092",
+ "topic_name": "HV_VES_PERF3GPP"
+ }
+ },
+ "heartbeat": {
+ "type": "kafka",
+ "kafka_info": {
+ "bootstrap_servers": "message-router-kafka:9092",
+ "topic_name": "HV_VES_HEARTBEAT"
+ }
+ }
+ }
+}
+
diff --git a/docs/sections/services/ves-hv/metrics_sample_response.txt b/docs/sections/services/ves-hv/resources/metrics_sample_response.txt
index da54e3ea..da54e3ea 100644
--- a/docs/sections/services/ves-hv/metrics_sample_response.txt
+++ b/docs/sections/services/ves-hv/resources/metrics_sample_response.txt
diff --git a/docs/sections/services/ves-hv/run-time-configuration.rst b/docs/sections/services/ves-hv/run-time-configuration.rst
index 76d622c6..95bad674 100644
--- a/docs/sections/services/ves-hv/run-time-configuration.rst
+++ b/docs/sections/services/ves-hv/run-time-configuration.rst
@@ -6,54 +6,42 @@
Run-Time configuration
======================
-(see :ref:`deployment`)
+HV-VES dynamic configuration is primarily meant to provide DMaaP Connection Objects (see :ref:`dmaap-connection-objects`).
+These objects contain information necessary to route received VES Events to correct Kafka topic. This metadata will be later referred to as Routing definition.
-HV-VES can fetch configuration directly from Consul service in the following JSON format:
+Collector internally uses DCAE-SDK to fetch configuration from Config Binding Service.
-.. code-block:: json
+HV-VES waits 10 seconds (default, configurable during deployment with **firstRequestDelay** option, see :ref:`configuration_file`) before the first attempt to retrieve configuration from CBS. This is to prevent possible synchronization issues. During that time HV-VES declines any connection attempts from xNF (VNF/PNF).
- {
- "dmaap.kafkaBootstrapServers": "message-router-kafka:9093",
- "collector.routing": [
- {
- "fromDomain": "perf3gpp",
- "toTopic": "HV_VES_PERF3GPP"
- },
- {
- "fromDomain": "heartbeat",
- "toTopic": "HV_VES_HEARTBEAT"
- },
- ...
- ]
- }
+After first request, HV-VES asks for configuration in fixed intervals, configurable from file configuration (**requestInterval**). By default interval is set to 5 seconds.
-HV-VES does not verify the correctness of configuration data and uses them as is, in particular:
+In case of failing to retrieve configuration, collector retries the action. After five unsuccessful attempts, container becomes unhealthy and cannot recover. HV-VES in this state is unusable and the container should be restarted.
-- **KafkaBootstrapServers** is used as host name and port for publishing events to Kafka service.
-- Every **routing** array object specifies one event publishing route.
- - **fromDomain** node should be a case-sensitive string of single domain taken from VES Common Event Header specification.
- - **toTopic** should be a case-sensitive string of Kafka topic.
- - When HV-VES receives VES Event, it checks the domain contained in it. If the route from that domain to any topic exists in configuration, then HV-VES publishes that event to topic in that route.
- - If there are two routes from the same domain to different topics, then it is undefined which route will be used.
+Configuration format
+--------------------
-The configuration is created from HV-VES Cloudify blueprint by specifying **application_config** node during ONAP OOM/Kubernetes deployment. Example of the node specification:
+Following JSON format presents dynamic configuration options recognized by HV-VES Collector.
+Note that there is no verification of the data correctness (e.g. if specified security files are present on machine) and thus invalid data can result in service malfunctioning or even container shutdown.
+
+.. literalinclude:: resources/dynamic-configuration.json
+ :language: json
+
+Fields have same meaning as in file configuration with only difference being Routing definition.
-.. code-block:: YAML
+Routing
+-------
- node_templates:
- hv-ves:
- properties:
- application_config:
- dmaap.kafkaBootstrapServers: message-router-kafka:9092
- collector.routing:
- fromDomain: perf3gpp
- toTopic: HV_VES_PERF3GPP
+For every JSON key-object pair defined in **"stream_publishes"**, the key is used as domain and related object is used to setup Kafka's bootstrap servers and Kafka topic **for this domain**.
-Endpoint on which HV-VES seeks configuration can be set during deployment as described in :ref:`deployment`.
+Collector when receiving VES Event from client checks if domain from the event corresponds to any from Routing and publishes this event onto related topic. If there is no match, the event is dropped. If there are two routes from the same domain to different topics, then it is undefined which route will be used.
-HV-VES waits 10 seconds (default, configurable during deplyoment with **first-request-delay** option, see :ref:`deployment`) before the first attempt to retrieve configuration from Consul. This is to prevent possible synchronization issues. During that time HV-VES declines any connection attempts from xNF (VNF/PNF).
+For more informations see :ref:`supported_domains`
-After first request, HV-VES asks for configuration in fixed intervals, configurable from command line (**request-interval**). By defualt interval is set to 5 seconds.
+Providing configuration during OOM deployment
+---------------------------------------------
+
+The configuration is created from HV-VES Cloudify blueprint by specifying **application_config** node during ONAP OOM/Kubernetes deployment. Example of the node specification:
-In case of failing to retrieve configuration, collector temporarily extends this interval and retries. After five unsuccessfull attempts, container becomes unhealthy and cannot recover. HV-VES in this state is unusable and the container should be restarted.
+.. literalinclude:: resources/blueprint-snippet.yaml
+ :language: yaml \ No newline at end of file
diff --git a/docs/sections/services/ves-hv/troubleshooting.rst b/docs/sections/services/ves-hv/troubleshooting.rst
index 3defe7f1..5c614bbe 100644
--- a/docs/sections/services/ves-hv/troubleshooting.rst
+++ b/docs/sections/services/ves-hv/troubleshooting.rst
@@ -32,6 +32,8 @@ Error and warning logs contain also:
* exception message
* stack trace
+Also exact exception's stack traces has been dropped due to readability
+
**Do not rely on exact log messages or their presence, as they are often subject to change.**
Deployment/Installation errors
@@ -41,152 +43,38 @@ Deployment/Installation errors
::
- Unexpected error when parsing command line arguments
- usage: java org.onap.dcae.collectors.veshv.main.MainKt
- Required parameters: s, p, c
- -c,--config-url <arg> URL of ves configuration on consul
- -d,--first-request-delay <arg> Delay of first request to consul in
- seconds
- -H,--health-check-api-port <arg> Health check rest api listen port
- -I,--request-interval <arg> Interval of consul configuration
- requests in seconds
- -i,--idle-timeout-sec <arg> Idle timeout for remote hosts. After
- given time without any data exchange
- the
- connection might be closed.
- -k,--key-store <arg> Key store in PKCS12 format
- -kp,--key-store-password <arg> Key store password
- -l,--ssl-disable Disable SSL encryption
- -m,--max-payload-size <arg> Maximum supported payload size in
- bytes
- -p,--listen-port <arg> Listen port
- -s,--kafka-bootstrap-servers <arg> Comma-separated Kafka bootstrap
- servers in <host>:<port> format
- -t,--trust-store <arg> File with trusted certificate bundle
- in PKCS12 format
- -tp,--trust-store-password <arg> Trust store password
- -u,--dummy If present will start in dummy mode
- (dummy external services)
- All parameters can be specified as environment variables using
- upper-snake-case full name with prefix `VESHV_`.
-
-
-This log message is printed when you do not specify the required parameters (via command line, or in environment variables).
-As described in the above log message, there are a few required parameters:
-**listen port**, **config url**, **kafka-bootstrap-servers** and either **trust store password** and **key store password** if you want to use SSL, or only **ssl disable** if not.
-
-To get rid of this error, specify the required parameters. For example:
-
-- Via command line:
+ | org.onap.dcae.collectors.veshv.main | ERROR | Failed to create configuration: Base configuration filepath missing on command line
+ | org.onap.dcae.collectors.veshv.main | ERROR | Failed to start a server | org.onap.dcae.collectors.veshv.config.api.model.MissingArgumentException: Base configuration filepath missing on command line
-::
+These log messages are printed when the single required parameter, configuration file path, was not specified (via command line, or as an environment variable).
+Command line arguments have priority over environment variables. If you configure a parameter in both ways, **HV-VES** applies the one from the command line.
+For more information about **HV-VES** configuration parameters, see :ref:`deployment`.
- <hv-ves run command> --listen-port 6061 --config-url http://consul-url/key-path --kafka-bootstrap-servers message-router-kafka:9092 --key-store-password password --trust-store-password password
+Configuration errors
+--------------------
-- By defining environment variables:
+**Consul service not available**
::
- export VESHV_LISTEN_PORT=6061
- export VESHV_CONFIG_URL=http://consul-url/key-path
- export VESHV_KAFKA_BOOTSTRAP_SERVERS=message-router-kafka:9092
- export VESHV_KEY_STORE_PASSWORD=password
- export VESHV_TRUST_STORE_PASSWORD=password
-
-**NOTE**
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | ERROR | Failed to retrieve CBS client: consul-server: Temporary failure in name resolution
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | WARN | Exception from configuration provider client, retrying subscription | java.net.UnknownHostException: consul-server: Temporary failure in name resolution
-Command line arguments have priority over environment variables. If you configure a parameter in both ways, **HV-VES** applies the one from the command line.
-For more information about **HV-VES** configuration parameters, see :ref:`deployment`.
+**HV-VES** looks for Consul under hostname defined in CONSUL_HOST environment variable. If the service is down, above logs will appear and after few retries collector will shut down.
-Configuration errors
---------------------
-**Consul service not responding**
+**Config Binding Service not available**
::
- | ap.dcae.collectors.veshv.impl.adapters.HttpAdapter | ERROR | Failed to get resource on path: http://invalid-host:8500/v1/kv/veshv-config?raw=true (consul-server1: Temporary failure in name resolution)
- | ap.dcae.collectors.veshv.impl.adapters.HttpAdapter | DEBUG | Nested exception: | | java.net.UnknownHostException: consul-server1: Temporary failure in name resolution
- at java.base/java.net.Inet4AddressImpl.lookupAllHostAddr(Native Method)
- at java.base/java.net.InetAddress$PlatformNameService.lookupAllHostAddr(InetAddress.java:929)
- at java.base/java.net.InetAddress.getAddressesFromNameService(InetAddress.java:1515)
- at java.base/java.net.InetAddress$NameServiceAddresses.get(InetAddress.java:848)
- at java.base/java.net.InetAddress.getAllByName0(InetAddress.java:1505)
- at java.base/java.net.InetAddress.getAllByName(InetAddress.java:1364)
- at java.base/java.net.InetAddress.getAllByName(InetAddress.java:1298)
- at java.base/java.net.InetAddress.getByName(InetAddress.java:1248)
- at io.netty.util.internal.SocketUtils$8.run(SocketUtils.java:146)
- at io.netty.util.internal.SocketUtils$8.run(SocketUtils.java:143)
- at java.base/java.security.AccessController.doPrivileged(Native Method)
- at io.netty.util.internal.SocketUtils.addressByName(SocketUtils.java:143)
- at io.netty.resolver.DefaultNameResolver.doResolve(DefaultNameResolver.java:43)
- at io.netty.resolver.SimpleNameResolver.resolve(SimpleNameResolver.java:63)
- at io.netty.resolver.SimpleNameResolver.resolve(SimpleNameResolver.java:55)
- at io.netty.resolver.InetSocketAddressResolver.doResolve(InetSocketAddressResolver.java:57)
- at io.netty.resolver.InetSocketAddressResolver.doResolve(InetSocketAddressResolver.java:32)
- at io.netty.resolver.AbstractAddressResolver.resolve(AbstractAddressResolver.java:108)
- at io.netty.bootstrap.Bootstrap.doResolveAndConnect0(Bootstrap.java:208)
- at io.netty.bootstrap.Bootstrap.access$000(Bootstrap.java:49)
- at io.netty.bootstrap.Bootstrap$1.operationComplete(Bootstrap.java:188)
- at io.netty.bootstrap.Bootstrap$1.operationComplete(Bootstrap.java:174)
- at io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:511)
- at io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:485)
- at io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:424)
- at io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:103)
- at io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84)
- at io.netty.channel.AbstractChannel$AbstractUnsafe.safeSetSuccess(AbstractChannel.java:978)
- at io.netty.channel.AbstractChannel$AbstractUnsafe.register0(AbstractChannel.java:512)
- at io.netty.channel.AbstractChannel$AbstractUnsafe.access$200(AbstractChannel.java:423)
- at io.netty.channel.AbstractChannel$AbstractUnsafe$1.run(AbstractChannel.java:482)
- at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:163)
- at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:404)
- at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:315)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | rs.veshv.impl.adapters.ConsulConfigurationProvider | WARN | Could not load fresh configuration | java.net.UnknownHostException: consul-server1: Temporary failure in name resolution
- at java.base/java.net.Inet4AddressImpl.lookupAllHostAddr(Native Method)
- at java.base/java.net.InetAddress$PlatformNameService.lookupAllHostAddr(InetAddress.java:929)
- at java.base/java.net.InetAddress.getAddressesFromNameService(InetAddress.java:1515)
- at java.base/java.net.InetAddress$NameServiceAddresses.get(InetAddress.java:848)
- at java.base/java.net.InetAddress.getAllByName0(InetAddress.java:1505)
- at java.base/java.net.InetAddress.getAllByName(InetAddress.java:1364)
- at java.base/java.net.InetAddress.getAllByName(InetAddress.java:1298)
- at java.base/java.net.InetAddress.getByName(InetAddress.java:1248)
- at io.netty.util.internal.SocketUtils$8.run(SocketUtils.java:146)
- at io.netty.util.internal.SocketUtils$8.run(SocketUtils.java:143)
- at java.base/java.security.AccessController.doPrivileged(Native Method)
- at io.netty.util.internal.SocketUtils.addressByName(SocketUtils.java:143)
- at io.netty.resolver.DefaultNameResolver.doResolve(DefaultNameResolver.java:43)
- at io.netty.resolver.SimpleNameResolver.resolve(SimpleNameResolver.java:63)
- at io.netty.resolver.SimpleNameResolver.resolve(SimpleNameResolver.java:55)
- at io.netty.resolver.InetSocketAddressResolver.doResolve(InetSocketAddressResolver.java:57)
- at io.netty.resolver.InetSocketAddressResolver.doResolve(InetSocketAddressResolver.java:32)
- at io.netty.resolver.AbstractAddressResolver.resolve(AbstractAddressResolver.java:108)
- at io.netty.bootstrap.Bootstrap.doResolveAndConnect0(Bootstrap.java:208)
- at io.netty.bootstrap.Bootstrap.access$000(Bootstrap.java:49)
- at io.netty.bootstrap.Bootstrap$1.operationComplete(Bootstrap.java:188)
- at io.netty.bootstrap.Bootstrap$1.operationComplete(Bootstrap.java:174)
- at io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:511)
- at io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:485)
- at io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:424)
- at io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:103)
- at io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84)
- at io.netty.channel.AbstractChannel$AbstractUnsafe.safeSetSuccess(AbstractChannel.java:978)
- at io.netty.channel.AbstractChannel$AbstractUnsafe.register0(AbstractChannel.java:512)
- at io.netty.channel.AbstractChannel$AbstractUnsafe.access$200(AbstractChannel.java:423)
- at io.netty.channel.AbstractChannel$AbstractUnsafe$1.run(AbstractChannel.java:482)
- at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:163)
- at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:404)
- at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:315)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | ors.veshv.healthcheck.factory.HealthCheckApiServer | DEBUG | HV-VES status: OUT_OF_SERVICE, Consul configuration not available. Retrying.
-
-
-
-The above three logs indicate that **HV-VES** cannot connect to the Consul service under url given in ``--consul-url`` parameter.
-Make sure Consul is up and running and the **ip + port** combination is correct.
+ | org.onap.dcae.services.sdk.rest.services.cbs.client.impl.CbsLookup | INFO | Config Binding Service address: config-binding-service:10000
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | INFO | CBS client successfully created
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | ERROR | Error while creating configuration: config-binding-service: Temporary failure in name resolution
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | WARN | Exception from configuration provider client, retrying subscription
+
+Logs indicate that **HV-VES** successfully retrieved Config Binding Service (later referred as CBS) connection string from Consul, though the address was either incorrect, or the CBS is down.
+Make sure CBS is up and running and the connection string stored in Consul is correct.
====
@@ -194,73 +82,13 @@ Make sure Consul is up and running and the **ip + port** combination is correct.
::
- | ap.dcae.collectors.veshv.impl.adapters.HttpAdapter | ERROR | Failed to get resource on path: http://consul-server:8500/v1/kv/invalid-resource?raw=true (http://consul-server:8500/v1/kv/invalid-resource?raw=true 404 Not Found)
- | ap.dcae.collectors.veshv.impl.adapters.HttpAdapter | DEBUG | Nested exception: | java.lang.IllegalStateException: http://consul-server:8500/v1/kv/invalid-resource?raw=true 404 Not Found
- at org.onap.dcae.collectors.veshv.impl.adapters.HttpAdapter$get$2.apply(HttpAdapter.kt:46)
- at org.onap.dcae.collectors.veshv.impl.adapters.HttpAdapter$get$2.apply(HttpAdapter.kt:34)
- at reactor.netty.http.client.HttpClientFinalizer.lambda$responseSingle$7(HttpClientFinalizer.java:95)
- at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:118)
- at reactor.core.publisher.FluxRetryPredicate$RetryPredicateSubscriber.onNext(FluxRetryPredicate.java:81)
- at reactor.core.publisher.MonoCreate$DefaultMonoSink.success(MonoCreate.java:147)
- at reactor.netty.http.client.HttpClientConnect$HttpObserver.onStateChange(HttpClientConnect.java:383)
- at reactor.netty.resources.PooledConnectionProvider$DisposableAcquire.onStateChange(PooledConnectionProvider.java:501)
- at reactor.netty.resources.PooledConnectionProvider$PooledConnection.onStateChange(PooledConnectionProvider.java:443)
- at reactor.netty.http.client.HttpClientOperations.onInboundNext(HttpClientOperations.java:494)
- at reactor.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:141)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.CombinedChannelDuplexHandler$DelegatingChannelHandlerContext.fireChannelRead(CombinedChannelDuplexHandler.java:438)
- at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)
- at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)
- at io.netty.channel.CombinedChannelDuplexHandler.channelRead(CombinedChannelDuplexHandler.java:253)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
- at io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:808)
- at io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:410)
- at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:310)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | rs.veshv.impl.adapters.ConsulConfigurationProvider | WARN | Could not load fresh configuration | java.lang.IllegalStateException: http://consul-server:8500/v1/kv/invalid-resource?raw=true 404 Not Found
- at org.onap.dcae.collectors.veshv.impl.adapters.HttpAdapter$get$2.apply(HttpAdapter.kt:46)
- at org.onap.dcae.collectors.veshv.impl.adapters.HttpAdapter$get$2.apply(HttpAdapter.kt:34)
- at reactor.netty.http.client.HttpClientFinalizer.lambda$responseSingle$7(HttpClientFinalizer.java:95)
- at reactor.core.publisher.MonoFlatMap$FlatMapMain.onNext(MonoFlatMap.java:118)
- at reactor.core.publisher.FluxRetryPredicate$RetryPredicateSubscriber.onNext(FluxRetryPredicate.java:81)
- at reactor.core.publisher.MonoCreate$DefaultMonoSink.success(MonoCreate.java:147)
- at reactor.netty.http.client.HttpClientConnect$HttpObserver.onStateChange(HttpClientConnect.java:383)
- at reactor.netty.resources.PooledConnectionProvider$DisposableAcquire.onStateChange(PooledConnectionProvider.java:501)
- at reactor.netty.resources.PooledConnectionProvider$PooledConnection.onStateChange(PooledConnectionProvider.java:443)
- at reactor.netty.http.client.HttpClientOperations.onInboundNext(HttpClientOperations.java:494)
- at reactor.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:141)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.CombinedChannelDuplexHandler$DelegatingChannelHandlerContext.fireChannelRead(CombinedChannelDuplexHandler.java:438)
- at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)
- at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)
- at io.netty.channel.CombinedChannelDuplexHandler.channelRead(CombinedChannelDuplexHandler.java:253)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
- at io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:808)
- at io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:410)
- at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:310)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | ors.veshv.healthcheck.factory.HealthCheckApiServer | DEBUG | HV-VES status: OUT_OF_SERVICE, Consul configuration not available. Retrying.
-
-
-**HV-VES** logs this information when connected to Consul, but cannot find any JSON configuration under given key which in this case is **invalid-resource**.
+ | org.onap.dcae.services.sdk.rest.services.cbs.client.impl.CbsLookup | INFO | Config Binding Service address: config-binding-service:10000
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | INFO | CBS client successfully created
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | ERROR | Error while creating configuration: Request failed for URL 'http://config-binding-service:10000/service_component/invalid-resource'. Response code: 404 Not Found
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | WARN | Exception from configuration provider client, retrying subscription | | org.onap.dcaegen2.services.sdk.rest.services.adapters.http.exceptions.HttpException: Request failed for URL 'http://config-binding-service:10000/service_component/dcae-hv-ves-collector'. Response code: 404 Not Found
+
+
+**HV-VES** logs this information when connected to Consul, but cannot find JSON configuration under given key which in this case is **invalid-resource**.
For more information, see :ref:`run_time_configuration`
====
@@ -268,73 +96,17 @@ For more information, see :ref:`run_time_configuration`
**Invalid configuration format**
::
+ | org.onap.dcae.services.sdk.rest.services.cbs.client.impl.CbsLookup | INFO | Config Binding Service address: config-binding-service:10000
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | INFO | CBS client successfully created
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | INFO | Received new configuration:
+ | {"streams_publishes":{"perf3gpp":{"typo":"kafka","kafka_info":{"bootstrap_servers":"message-router-kafka:9092","topic_name":"HV_VES_PERF3GPP"}}}}
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | ERROR | Error while creating configuration: Could not find sub-node 'type'. Actual sub-nodes: typo, kafka_info
+ | org.onap.dcae.collectors.veshv.config.impl.CbsConfigurationProvider | WARN | Exception from configuration provider client, retrying subscription | org.onap.dcaegen2.services.sdk.rest.services.cbs.client.api.exceptions.StreamParsingException: Could not find sub-node 'type'. Actual sub-nodes: typo, kafka_info
+
- | rs.veshv.impl.adapters.ConsulConfigurationProvider | INFO | Obtained new configuration from consul:
- { "invalidKey": "value" }
- | 2018-12-20T15:38:14.543Z | rs.veshv.impl.adapters.ConsulConfigurationProvider | WARN | Could not load fresh configuration | org.onap.dcae.collectors.veshv.impl.adapters.ParsingException: Failed to parse consul configuration
- at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider.createCollectorConfiguration(ConsulConfigurationProvider.kt:125)
- at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider.access$createCollectorConfiguration(ConsulConfigurationProvider.kt:48)
- at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider$invoke$4.invoke(ConsulConfigurationProvider.kt:80)
- at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider$invoke$4.invoke(ConsulConfigurationProvider.kt:48)
- at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider$sam$java_util_function_Function$0.apply(ConsulConfigurationProvider.kt)
- at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:100)
- at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:114)
- at reactor.core.publisher.FluxFlatMap$FlatMapMain.tryEmitScalar(FluxFlatMap.java:449)
- at reactor.core.publisher.FluxFlatMap$FlatMapMain.onNext(FluxFlatMap.java:384)
- at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.innerNext(FluxConcatMap.java:275)
- at reactor.core.publisher.FluxConcatMap$ConcatMapInner.onNext(FluxConcatMap.java:849)
- at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:121)
- at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onNext(FluxPeekFuseable.java:204)
- at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1476)
- at reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241)
- at reactor.core.publisher.FluxDoFinally$DoFinallySubscriber.onNext(FluxDoFinally.java:123)
- at reactor.core.publisher.FluxHandle$HandleSubscriber.onNext(FluxHandle.java:113)
- at reactor.core.publisher.FluxMapFuseable$MapFuseableConditionalSubscriber.onNext(FluxMapFuseable.java:287)
- at reactor.core.publisher.FluxUsing$UsingFuseableSubscriber.onNext(FluxUsing.java:350)
- at reactor.core.publisher.FluxFilterFuseable$FilterFuseableSubscriber.onNext(FluxFilterFuseable.java:113)
- at reactor.core.publisher.FluxPeekFuseable$PeekFuseableConditionalSubscriber.onNext(FluxPeekFuseable.java:486)
- at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1476)
- at reactor.core.publisher.MonoReduceSeed$ReduceSeedSubscriber.onComplete(MonoReduceSeed.java:156)
- at reactor.core.publisher.FluxMap$MapSubscriber.onComplete(FluxMap.java:136)
- at reactor.netty.channel.FluxReceive.terminateReceiver(FluxReceive.java:378)
- at reactor.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:202)
- at reactor.netty.channel.FluxReceive.onInboundComplete(FluxReceive.java:343)
- at reactor.netty.channel.ChannelOperations.onInboundComplete(ChannelOperations.java:325)
- at reactor.netty.channel.ChannelOperations.terminate(ChannelOperations.java:372)
- at reactor.netty.http.client.HttpClientOperations.onInboundNext(HttpClientOperations.java:522)
- at reactor.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:141)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.CombinedChannelDuplexHandler$DelegatingChannelHandlerContext.fireChannelRead(CombinedChannelDuplexHandler.java:438)
- at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)
- at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)
- at io.netty.channel.CombinedChannelDuplexHandler.channelRead(CombinedChannelDuplexHandler.java:253)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
- at io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:808)
- at io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:410)
- at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:310)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | ors.veshv.healthcheck.factory.HealthCheckApiServer | DEBUG | HV-VES status: OUT_OF_SERVICE, Consul configuration not available. Retrying.
-
-
-This log is printed when you upload a configuration in an invalid format (for example, with missing fields). In the first log you can see that configuration on Consul is:
-
-.. code-block:: json
-
- {
- "invalidKey": "value"
- }
-
-The above is not a valid **HV-VES** configuration, therefore **HV-VES** does not apply it and becomes **unhealthy**.
-For more information on **Consul configuration**, see :ref:`run_time_configuration`.
+This log is printed when you upload a configuration in an invalid format
+Received json contains invalid Streams configuration, therefore **HV-VES** does not apply it and becomes **unhealthy**.
+For more information on dynamic configuration, see :ref:`run_time_configuration`.
Message handling errors
@@ -344,165 +116,39 @@ Message handling errors
::
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | DEBUG | Client connection request received
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new connection
- | org.apache.kafka.clients.ClientUtils | WARN | Removing server invalid-kafka-host:9092 from bootstrap.servers as DNS resolution failed for invalid-kafka-host
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | WARN | Error while handling message stream: org.apache.kafka.common.KafkaException (Failed to construct kafka producer)
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | DEBUG | Detailed stack trace | org.apache.kafka.common.config.ConfigException: No resolvable bootstrap urls given in bootstrap.servers
- at org.apache.kafka.clients.ClientUtils.parseAndValidateAddresses(ClientUtils.java:66)
- at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:406)
- ... 49 common frames omitted
- Wrapped by: org.apache.kafka.common.KafkaException: Failed to construct kafka producer
- at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:457)
- at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:289)
- at reactor.kafka.sender.internals.ProducerFactory.createProducer(ProducerFactory.java:33)
- at reactor.kafka.sender.internals.DefaultKafkaSender.lambda$new$0(DefaultKafkaSender.java:96)
- at reactor.core.publisher.MonoCallable.subscribe(MonoCallable.java:56)
- at reactor.core.publisher.MonoPeekFuseable.subscribe(MonoPeekFuseable.java:74)
- at reactor.core.publisher.Mono.subscribe(Mono.java:3590)
- at reactor.core.publisher.MonoProcessor.add(MonoProcessor.java:531)
- at reactor.core.publisher.MonoProcessor.subscribe(MonoProcessor.java:444)
- at reactor.core.publisher.MonoFlatMapMany.subscribe(MonoFlatMapMany.java:49)
- at reactor.core.publisher.FluxPeek.subscribe(FluxPeek.java:83)
- at reactor.core.publisher.FluxMap.subscribe(FluxMap.java:62)
- at reactor.core.publisher.FluxPeek.subscribe(FluxPeek.java:83)
- at reactor.core.publisher.FluxDefer.subscribe(FluxDefer.java:54)
- at reactor.core.publisher.FluxPeek.subscribe(FluxPeek.java:83)
- at reactor.core.publisher.FluxOnErrorResume.subscribe(FluxOnErrorResume.java:47)
- at reactor.core.publisher.FluxDoFinally.subscribe(FluxDoFinally.java:73)
- at reactor.core.publisher.MonoIgnoreElements.subscribe(MonoIgnoreElements.java:37)
- at reactor.netty.tcp.TcpServerHandle.onStateChange(TcpServerHandle.java:64)
- at reactor.netty.tcp.TcpServerBind$ChildObserver.onStateChange(TcpServerBind.java:226)
- at reactor.netty.channel.ChannelOperationsHandler.channelActive(ChannelOperationsHandler.java:112)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelActive(AbstractChannelHandlerContext.java:213)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelActive(AbstractChannelHandlerContext.java:199)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelActive(AbstractChannelHandlerContext.java:192)
- at reactor.netty.tcp.SslProvider$SslReadHandler.userEventTriggered(SslProvider.java:720)
- at io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:329)
- at io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:315)
- at io.netty.channel.AbstractChannelHandlerContext.fireUserEventTriggered(AbstractChannelHandlerContext.java:307)
- at io.netty.handler.ssl.SslHandler.setHandshakeSuccess(SslHandler.java:1530)
- at io.netty.handler.ssl.SslHandler.wrapNonAppData(SslHandler.java:937)
- at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1360)
- at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1199)
- at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1243)
- at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:489)
- at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:428)
- at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:265)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
- at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163)
- at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:628)
- at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:528)
- at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:482)
- at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | DEBUG | Released buffer memory after handling message stream
-
-
-**HV-VES** responds with the above when it handles a message and specified DmaaP MR Kafka bootstrap server is invalid.
-Restart with different ``--kafka-bootstrap-servers`` command line option value is required.
-For more information, see: :ref:`deployment`
+ | org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new client connection
+ | org.apache.kafka.clients.ClientUtils | WARN | Removing server invalid-message-router-kafka:9092 from bootstrap.servers as DNS resolution failed for invalid-message-router-kafka
+ | org.apache.kafka.clients.producer.KafkaProducer | INFO | [Producer clientId=producer-1] Closing the Kafka producer with timeoutMillis = 0 ms.
+ | org.onap.dcae.collectors.veshv.impl.HvVesCollector | WARN | Error while handling message stream: org.apache.kafka.common.KafkaException (Failed to construct kafka producer)
+ | org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Connection has been close0d
+
+
+**HV-VES** responds with the above when it handles message with domain that has invalid bootstrap_servers specified in **streams_publishes** configuration.
+To fix this problem you have to correct **streams_publishes** configuration stored in Consul.
+For more information, see: :ref:`run_time_configuration`.
====
**Kafka service became unavailable after producer has been created**
-**HV-VES** lazily creates Kafka consumer after first successfully handled event.
-If Kafka service becomes unreachable after consumer initialization, it is removed from bootstrap.servers list on next connection.
-
-Following information is logged:
+**HV-VES** lazily creates Kafka producer for each domain.
+If Kafka service becomes unreachable after producer initialization, appropriate logs will be shown and **HV-VES** will fail to deliver future messages to that Kafka service.
::
- | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available.
- | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available.
- | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available.
- | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available.
- | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Error connecting to node message-router-kafka:9092 (id: 1001 rack: null) | | java.nio.channels.UnresolvedAddressException: null
- at java.base/sun.nio.ch.Net.checkAddress(Net.java:130)
- at java.base/sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:675)
- at org.apache.kafka.common.network.Selector.doConnect(Selector.java:233)
- ... 9 common frames omitted
- Wrapped by: java.io.IOException: Can't resolve address: message-router-kafka:9092
- at org.apache.kafka.common.network.Selector.doConnect(Selector.java:235)
- at org.apache.kafka.common.network.Selector.connect(Selector.java:214)
- at org.apache.kafka.clients.NetworkClient.initiateConnect(NetworkClient.java:864)
- at org.apache.kafka.clients.NetworkClient.access$700(NetworkClient.java:64)
- at org.apache.kafka.clients.NetworkClient$DefaultMetadataUpdater.maybeUpdate(NetworkClient.java:1035)
- at org.apache.kafka.clients.NetworkClient$DefaultMetadataUpdater.maybeUpdate(NetworkClient.java:920)
- at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:508)
- at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:239)
- at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:163)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | | kafka-producer-network-thread | producer-1
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new connection
- | org.apache.kafka.clients.ClientUtils | WARN | Removing server message-router-kafka:9092 from bootstrap.servers as DNS resolution failed for message-router-kafka
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | WARN | Error while handling message stream: org.apache.kafka.common.KafkaException (Failed to construct kafka producer)
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | DEBUG | Detailed stack trace
- at org.apache.kafka.clients.ClientUtils.parseAndValidateAddresses(ClientUtils.java:66)
- at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:406)
- ... 48 common frames omitted
- Wrapped by: org.apache.kafka.common.KafkaException: Failed to construct kafka producer
- at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:457)
- at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:289)
- at reactor.kafka.sender.internals.ProducerFactory.createProducer(ProducerFactory.java:33)
- at reactor.kafka.sender.internals.DefaultKafkaSender.lambda$new$0(DefaultKafkaSender.java:96)
- at reactor.core.publisher.MonoCallable.subscribe(MonoCallable.java:56)
- at reactor.core.publisher.MonoPeekFuseable.subscribe(MonoPeekFuseable.java:74)
- at reactor.core.publisher.Mono.subscribe(Mono.java:3590)
- at reactor.core.publisher.MonoProcessor.add(MonoProcessor.java:531)
- at reactor.core.publisher.MonoProcessor.subscribe(MonoProcessor.java:444)
- at reactor.core.publisher.MonoFlatMapMany.subscribe(MonoFlatMapMany.java:49)
- at reactor.core.publisher.FluxPeek.subscribe(FluxPeek.java:83)
- at reactor.core.publisher.FluxMap.subscribe(FluxMap.java:62)
- at reactor.core.publisher.FluxPeek.subscribe(FluxPeek.java:83)
- at reactor.core.publisher.FluxDefer.subscribe(FluxDefer.java:54)
- at reactor.core.publisher.FluxPeek.subscribe(FluxPeek.java:83)
- at reactor.core.publisher.FluxOnErrorResume.subscribe(FluxOnErrorResume.java:47)
- at reactor.core.publisher.FluxDoFinally.subscribe(FluxDoFinally.java:73)
- at reactor.core.publisher.MonoIgnoreElements.subscribe(MonoIgnoreElements.java:37)
- at reactor.netty.tcp.TcpServerHandle.onStateChange(TcpServerHandle.java:64)
- at reactor.netty.tcp.TcpServerBind$ChildObserver.onStateChange(TcpServerBind.java:226)
- at reactor.netty.channel.ChannelOperationsHandler.channelActive(ChannelOperationsHandler.java:112)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelActive(AbstractChannelHandlerContext.java:213)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelActive(AbstractChannelHandlerContext.java:199)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelActive(AbstractChannelHandlerContext.java:192)
- at reactor.netty.tcp.SslProvider$SslReadHandler.userEventTriggered(SslProvider.java:720)
- at io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:329)
- at io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:315)
- at io.netty.channel.AbstractChannelHandlerContext.fireUserEventTriggered(AbstractChannelHandlerContext.java:307)
- at io.netty.handler.ssl.SslHandler.setHandshakeSuccess(SslHandler.java:1530)
- at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1368)
- at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1199)
- at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1243)
- at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:489)
- at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:428)
- at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:265)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
- at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163)
- at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:628)
- at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:528)
- at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:482)
- at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | DEBUG | Released buffer memory after handling message stream
-
-
-To resolve this issue, you can either wait for that Kafka service to be available, or just like in previous paragraph, restart **HV-VES** with different value of ``--kafka-bootstrap-servers`` option.
+ | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available.
+ | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available.
+ | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available.
+ | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available.
+ | org.onap.dcae.collector.veshv.impl.socket.NettyTcpServer | INFO | Handling new client connection
+ | org.onap.dcae.collector.veshv.impl.socket.NettyTcpServer | INFO | Connection has been closed
+ | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available
+ | org.onap.dcae.collector.veshv.impl.adapters.kafka.KafkaPublisher | WARN | Failed to send message to Kafka. Reason: Expiring 1 record(s) for HV_VES_PERF3GPP-0: 30007 ms has passed since batch creation plus linger time
+ | org.onap.dcae.collectors.veshv.impl.HvVesCollector | WARN | Error while handling message stream: org.apache.kafka.common.errors.TimeoutException (Expiring 1 record(s) for HV_VES_PERF3GPP-0: 30007 ms has passed since batch creation plus linger time)
+ | org.apache.kafka.clients.NetworkClient | WARN | [Producer clientId=producer-1] Error connecting to node message-router-kafka:9092 (id: 1001 rack: null)
+
+
+To resolve this issue, you can either wait for that Kafka service to be available, or just like in previous paragraph, provide alternative Kafka bootstrap server via dynamic configuration (see :ref:`run_time_configuration`.)
====
@@ -510,127 +156,12 @@ To resolve this issue, you can either wait for that Kafka service to be availabl
::
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | DEBUG | Client connection request received
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new connection
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | TRACE | Got message with total size of 16384 B
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | WARN | Error while handling message stream: org.onap.dcae.collectors.veshv.impl.wire.WireFrameException (PayloadSizeExceeded: payload size exceeds the limit (1048576 bytes))
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | DEBUG | Detailed stack trace | org.onap.dcae.collectors.veshv.impl.wire.WireFrameException: PayloadSizeExceeded: payload size exceeds the limit (1048576 bytes)
- at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$onError$1$1.invoke(WireChunkDecoder.kt:72)
- at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$onError$1$1.invoke(WireChunkDecoder.kt:41)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:33)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:27)
- at arrow.effects.IORunLoop.step(IORunLoop.kt:49)
- at arrow.effects.IO.unsafeRunTimed(IO.kt:115)
- at arrow.effects.IO.unsafeRunSync(IO.kt:112)
- at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$generateFrames$1.accept(WireChunkDecoder.kt:66)
- at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$generateFrames$1.accept(WireChunkDecoder.kt:41)
- at reactor.core.publisher.FluxGenerate.lambda$new$1(FluxGenerate.java:56)
- at reactor.core.publisher.FluxGenerate$GenerateSubscription.slowPath(FluxGenerate.java:262)
- at reactor.core.publisher.FluxGenerate$GenerateSubscription.request(FluxGenerate.java:204)
- at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.set(Operators.java:1849)
- at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onSubscribe(FluxOnErrorResume.java:68)
- at reactor.core.publisher.FluxGenerate.subscribe(FluxGenerate.java:83)
- at reactor.core.publisher.FluxOnErrorResume.subscribe(FluxOnErrorResume.java:47)
- at reactor.core.publisher.FluxDoFinally.subscribe(FluxDoFinally.java:73)
- at reactor.core.publisher.FluxDefer.subscribe(FluxDefer.java:54)
- at reactor.core.publisher.Flux.subscribe(Flux.java:7734)
- at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:442)
- at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onNext(FluxConcatMap.java:244)
- at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:192)
- at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:192)
- at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:114)
- at reactor.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:211)
- at reactor.netty.channel.FluxReceive.onInboundNext(FluxReceive.java:327)
- at reactor.netty.channel.ChannelOperations.onInboundNext(ChannelOperations.java:310)
- at reactor.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:141)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:286)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1429)
- at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1199)
- at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1243)
- at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:489)
- at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:428)
- at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:265)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
- at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163)
- at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:628)
- at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:528)
- at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:482)
- at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | WARN | Error while handling message stream: org.onap.dcae.collectors.veshv.impl.wire.WireFrameException (PayloadSizeExceeded: payload size exceeds the limit (1048576 bytes))
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | DEBUG | Detailed stack trace | org.onap.dcae.collectors.veshv.impl.wire.WireFrameException: PayloadSizeExceeded: payload size exceeds the limit (1048576 bytes)
- at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$onError$1$1.invoke(WireChunkDecoder.kt:72)
- at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$onError$1$1.invoke(WireChunkDecoder.kt:41)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:33)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:27)
- at arrow.effects.IORunLoop.step(IORunLoop.kt:49)
- at arrow.effects.IO.unsafeRunTimed(IO.kt:115)
- at arrow.effects.IO.unsafeRunSync(IO.kt:112)
- at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$generateFrames$1.accept(WireChunkDecoder.kt:66)
- at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$generateFrames$1.accept(WireChunkDecoder.kt:41)
- at reactor.core.publisher.FluxGenerate.lambda$new$1(FluxGenerate.java:56)
- at reactor.core.publisher.FluxGenerate$GenerateSubscription.slowPath(FluxGenerate.java:262)
- at reactor.core.publisher.FluxGenerate$GenerateSubscription.request(FluxGenerate.java:204)
- at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.set(Operators.java:1849)
- at reactor.core.publisher.FluxOnErrorResume$ResumeSubscriber.onSubscribe(FluxOnErrorResume.java:68)
- at reactor.core.publisher.FluxGenerate.subscribe(FluxGenerate.java:83)
- at reactor.core.publisher.FluxOnErrorResume.subscribe(FluxOnErrorResume.java:47)
- at reactor.core.publisher.FluxDoFinally.subscribe(FluxDoFinally.java:73)
- at reactor.core.publisher.FluxDefer.subscribe(FluxDefer.java:54)
- at reactor.core.publisher.Flux.subscribe(Flux.java:7734)
- at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:442)
- at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onNext(FluxConcatMap.java:244)
- at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:192)
- at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:192)
- at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:114)
- at reactor.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:211)
- at reactor.netty.channel.FluxReceive.onInboundNext(FluxReceive.java:327)
- at reactor.netty.channel.ChannelOperations.onInboundNext(ChannelOperations.java:310)
- at reactor.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:141)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:286)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1429)
- at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1199)
- at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1243)
- at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:489)
- at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:428)
- at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:265)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
- at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
- at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
- at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
- at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163)
- at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:628)
- at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:528)
- at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:482)
- at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
- at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
- at java.base/java.lang.Thread.run(Thread.java:834)
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | DEBUG | Released buffer memory after handling message stream
-
-
-The above log is printed when the message payload size is too big. **HV-VES** does not handle messages that exceed specified payload size. Default value is **1048576 bytes (1MiB)**, but it can be configured via cmd or by environment variables.
+ | org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new client connection
+ | org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder | WARN | Error while handling message stream: org.onap.dcae.collectors.veshv.impl.wire.WireFrameException (PayloadSizeExceeded: payload size exceeds the limit (1048576 bytes))
+ | org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Connection has been closed
+
+
+The above log is printed when the message payload size is too big. **HV-VES** does not handle messages that exceed maximum payload size specified under streams_publishes configuration (see :ref:`dmaap-connection-objects`)
====
@@ -640,13 +171,9 @@ Messages with invalid Google Protocol Buffers data encoded are omitted. **HV-VES
::
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | DEBUG | Client connection request received
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new connection
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | TRACE | Got message with total size of 28 B
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | TRACE | Wire payload size: 16 B
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | TRACE | Wire frame header is valid
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | WARN | Failed to decode ves event header, reason: Protocol message tag had invalid wire type.
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | TRACE | End of data in current TCP buffer
+ | org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new client connection
+ | org.onap.dcae.collectors.veshv.impl.HvVesCollector | WARN | Failed to decode ves event header, reason: Protocol message tag had invalid wire type.
+ | org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Connection has been closed
====
@@ -656,13 +183,9 @@ Messages with invalid Wire Frame, just like those containing invalid GPB data, w
::
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | DEBUG | Client connection request received
- | p.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new connection
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | TRACE | Got message with total size of 322 B
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | TRACE | Wire payload size: 310 B
- | org.onap.dcae.collectors.veshv.impl.VesHvCollector | WARN | Invalid wire frame header, reason: Invalid major version in wire frame header. Expected 1 but was 2
- | p.dcae.collectors.veshv.impl.wire.WireChunkDecoder | TRACE | End of data in current TCP buffer
-
+ | org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Handling new client connection
+ | org.onap.dcae.collectors.veshv.impl.HvVesCollector | WARN | Invalid wire frame header, reason: Invalid major version in wire frame header. Expected 1 but was 2
+ | org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer | INFO | Connection has been closed
====
@@ -673,44 +196,19 @@ For more information, see the :ref:`hv_ves_behaviors` section.
Authorization related errors
----------------------------
-**WARNING: SSL/TLS authorization is a part of an experimental feature for ONAP Casablanca release and should be treated as unstable and subject to change in future releases.**
+**WARNING: SSL/TLS authorization is a part of an experimental feature for ONAP Dublin release and should be treated as unstable and subject to change in future releases.**
**For more information, see** :ref:`ssl_tls_authorization`.
**Key or trust store missing**
::
- | org.onap.dcae.collectors.veshv.main | ERROR | Failed to start a server | java.io.FileNotFoundException: /etc/ves-hv/server.p12 (No such file or directory)
- at java.io.FileInputStream.open0(Native Method)
- at java.io.FileInputStream.open(FileInputStream.java:195)
- at java.io.FileInputStream.<init>(FileInputStream.java:138)
- at org.onap.dcae.collectors.veshv.ssl.boundary.UtilsKt$streamFromFile$1.invoke(utils.kt:79)
- at org.onap.dcae.collectors.veshv.ssl.boundary.UtilsKt$streamFromFile$1.invoke(utils.kt)
- at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.loadKeyStoreFromFile(SslFactories.kt:50)
- at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.keyManagerFactory(SslFactories.kt:43)
- at org.onap.dcae.collectors.veshv.ssl.boundary.ServerSslContextFactory.jdkContext(ServerSslContextFactory.kt:42)
- at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContextWithConfiguredCerts(SslContextFactory.kt:49)
- at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContext(SslContextFactory.kt:39)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.configureServer(NettyTcpServer.kt:61)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.access$configureServer(NettyTcpServer.kt:46)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:52)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:46)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$sam$java_util_function_Consumer$0.accept(NettyTcpServer.kt)
- at reactor.ipc.netty.tcp.TcpServer.<init>(TcpServer.java:149)
- at reactor.ipc.netty.tcp.TcpServer$Builder.build(TcpServer.java:278)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:53)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:46)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:28)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:22)
- at arrow.effects.IORunLoop.step(IORunLoop.kt:50)
- at arrow.effects.IO.unsafeRunTimed(IO.kt:109)
- at arrow.effects.IO.unsafeRunSync(IO.kt:106)
- at org.onap.dcae.collectors.veshv.utils.arrow.EffectsKt.unsafeRunEitherSync(effects.kt:50)
- at org.onap.dcae.collectors.veshv.main.MainKt.main(main.kt:41)
+ | org.onap.dcae.collectors.veshv.main | ERROR | Failed to start a server | java.io.FileNotFoundException: /etc/ves-hv/server.p12
+
The above error is logged when key store is not provided. Similarly, when trust store is not provided, **/etc/ves-hv/trust.p12** file missing is logged.
-**server.p12** and **trust.p12** are default names of key and trust stores. They can be changed by specifying ``--trust-store`` or ``--key-store`` command line arguments on deployment.
+They can be changed by specifying ``security.keys.trustStore`` or ``security.keys.keyStore`` file configuration entries.
====
@@ -719,71 +217,28 @@ The above error is logged when key store is not provided. Similarly, when trust
::
| org.onap.dcae.collectors.veshv.main | ERROR | Failed to start a server | java.security.UnrecoverableKeyException: failed to decrypt safe contents entry: javax.crypto.BadPaddingException: Given final block not properly padded. Such issues can arise if a bad key is used during decryption.
- ... 23 common frames omitted
- Wrapped by: java.io.IOException: keystore password was incorrect
- at sun.security.pkcs12.PKCS12KeyStore.engineLoad(PKCS12KeyStore.java:2059)
- at java.security.KeyStore.load(KeyStore.java:1445)
- at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.loadKeyStoreFromFile(SslFactories.kt:51)
- at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.keyManagerFactory(SslFactories.kt:43)
- at org.onap.dcae.collectors.veshv.ssl.boundary.ServerSslContextFactory.jdkContext(ServerSslContextFactory.kt:42)
- at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContextWithConfiguredCerts(SslContextFactory.kt:49)
- at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContext(SslContextFactory.kt:39)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.configureServer(NettyTcpServer.kt:61)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.access$configureServer(NettyTcpServer.kt:46)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:52)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:46)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$sam$java_util_function_Consumer$0.accept(NettyTcpServer.kt)
- at reactor.ipc.netty.tcp.TcpServer.<init>(TcpServer.java:149)
- at reactor.ipc.netty.tcp.TcpServer$Builder.build(TcpServer.java:278)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:53)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:46)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:28)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:22)
- at arrow.effects.IORunLoop.step(IORunLoop.kt:50)
- at arrow.effects.IO.unsafeRunTimed(IO.kt:109)
- at arrow.effects.IO.unsafeRunSync(IO.kt:106)
- at org.onap.dcae.collectors.veshv.utils.arrow.EffectsKt.unsafeRunEitherSync(effects.kt:50)
- at org.onap.dcae.collectors.veshv.main.MainKt.main(main.kt:41)
Key or trust store password provided in configuration is invalid.
====
-**Invalid key store file**
+**Empty line at the end of password file**
::
+ | org.onap.dcae.collectors.veshv.main | ERROR | Failed to start a server | java.security.UnrecoverableKeyException: failed to decrypt safe contents entry: java.io.IOException: getSecretKey failed: Password is not ASCII
- | org.onap.dcae.collectors.veshv.main | ERROR | Failed to start a server | java.io.IOException: DerInputStream.getLength(): lengthTag=111, too big.
- at sun.security.util.DerInputStream.getLength(DerInputStream.java:599)
- at sun.security.util.DerValue.init(DerValue.java:391)
- at sun.security.util.DerValue.<init>(DerValue.java:332)
- at sun.security.util.DerValue.<init>(DerValue.java:345)
- at sun.security.pkcs12.PKCS12KeyStore.engineLoad(PKCS12KeyStore.java:1938)
- at java.security.KeyStore.load(KeyStore.java:1445)
- at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.loadKeyStoreFromFile(SslFactories.kt:51)
- at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.keyManagerFactory(SslFactories.kt:43)
- at org.onap.dcae.collectors.veshv.ssl.boundary.ServerSslContextFactory.jdkContext(ServerSslContextFactory.kt:42)
- at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContextWithConfiguredCerts(SslContextFactory.kt:49)
- at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContext(SslContextFactory.kt:39)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.configureServer(NettyTcpServer.kt:61)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.access$configureServer(NettyTcpServer.kt:46)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:52)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:46)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$sam$java_util_function_Consumer$0.accept(NettyTcpServer.kt)
- at reactor.ipc.netty.tcp.TcpServer.<init>(TcpServer.java:149)
- at reactor.ipc.netty.tcp.TcpServer$Builder.build(TcpServer.java:278)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:53)
- at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:46)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:28)
- at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:22)
- at arrow.effects.IORunLoop.step(IORunLoop.kt:50)
- at arrow.effects.IO.unsafeRunTimed(IO.kt:109)
- at arrow.effects.IO.unsafeRunSync(IO.kt:106)
- at org.onap.dcae.collectors.veshv.utils.arrow.EffectsKt.unsafeRunEitherSync(effects.kt:50)
- at org.onap.dcae.collectors.veshv.main.MainKt.main(main.kt:41)
-The above is logged when provided keystore has invalid or corrupted content.
-This log also appears when you try to use key store/trust store in archive format other than **PKCS12** (the only supported by **HV-VES** store type).
+Password file should not contain empty line at the end of the file, otherwise server startup will fail.
+====
+
+**Invalid key store file**
+
+::
+
+ | org.onap.dcae.collectors.veshv.main | ERROR | Failed to start a server | java.io.EOFException: Detect premature EOF
+
+The above is logged when provided keystore has invalid or corrupted content.
+This log also appears when you try to use key store/trust store in archive format other than inferred from file extension.
diff --git a/platformdoc/docs/components/dcae-cli/blueprint-generator/blueprint_generator.rst b/platformdoc/docs/components/dcae-cli/blueprint-generator/blueprint_generator.rst
new file mode 100644
index 00000000..06ed5347
--- /dev/null
+++ b/platformdoc/docs/components/dcae-cli/blueprint-generator/blueprint_generator.rst
@@ -0,0 +1,54 @@
+
+
+Blueprint Generator (DCAE)
+=============================================
+
+What is the Blueprint Generator?
+++++++++++++++++++++++++++++++++
+The blueprint generator is a java rewrite of the tosca lab python tool. The point of this tool is to be able to take a component spec for a given micro-service and translate that component spec into a blueprint yaml file that can be used during deployment.
+rin
+
+Steps to run the blueprint generator:
++++++++++++++++++++++++++++++++++++++
+
+1. Download the zip file from maven by clicking `here <https://nexus.onap.org/content/repositories/snapshots/org/onap/dcaegen2/platform/cli/blueprint-generator/1.0.0-SNAPSHOT/blueprint-generator-1.0.0-20190410.212437-1-bundle.tar.gz/>`_ or by going to https://nexus.onap.org/content/repositories/snapshots/org/onap/dcaegen2/platform/cli/blueprint-generator/1.0.0-SNAPSHOT/ and clicking "blueprint-generator-1.0.0-20190410.212437-1-bundle.tar.gz"
+
+2. Unzip the the tar file
+
+3. cd into the folder that gets created. (You should see a lib folder in this directory)
+
+4. If you are in linux run the following command: java -cp blueprint-generator/lib/blueprint-generator-1.0.0-SNAPSHOT.jar:blueprint-generator/lib/* org.onap.blueprintgenerator.core.BlueprintGenerator
+
+5. If you are in wondows run this command java -cp "lib/blueprint-generator-1.0.0-SNAPSHOT.jar;lib/\*" org.onap.blueprintgenerator.core.BlueprintGenerator.
+
+6. These commands mean that it will run java, find a path to the jar that we want to run along with all of the dependencies that we need, then you add the path to the main method. If done correctly you should see a list of all of the flags you can add.
+
+7. When ready you can run the program again except with the required flags.
+
+8. OPTIONS:
+ -p: The path to where the final blueprint yaml file will be created (Required)
+
+ -i: The path to the JSON spec file (required)
+
+ -n: Name of the blueprint (optional)
+
+ -t: the path to the import yaml file (optional)
+
+9. An example running this program in windows would look like this java -cp "lib/blueprint-generator-onap-0.0.1-SNAPSHOT.jar;lib/\*" org.onap.blueprintgenerator.core.BlueprintGenerator -p Blueprints -i ComponentSpecs/TestComponentSpec.json -n HelloWorld
+
+
+Extra information
+-----------------
+
+1. The component spec must be of the same format as stated in the onap `readthedocs <https://onap.readthedocs.io/en/latest/submodules/dcaegen2.git/docs/sections/components/component-specification/common-specification.html#working-with-component-specs>`_ page
+
+2. If the tag says required then the program will not run without those tags being there
+
+3. If the tag says optional then it is not necessary to run the program with those tags
+
+4. If you do not add a -n tag the blueprint name will default to what it is in the component spec
+
+5. If the directory you specified in the -p tag does not already exist the directory will be created for you
+
+6. The -t flag will override the default imports set for the blueprints. To see an example of how the import yaml file should be structured see the testImports.yaml file under the folder TestCases
+l \ No newline at end of file