summaryrefslogtreecommitdiffstats
path: root/docs/sections
diff options
context:
space:
mode:
Diffstat (limited to 'docs/sections')
-rw-r--r--docs/sections/apis/ves-hv/MeasDataCollection.proto78
-rw-r--r--docs/sections/apis/ves-hv/Perf3gppFields.proto37
-rw-r--r--docs/sections/apis/ves-hv/index.rst84
-rw-r--r--docs/sections/services/ves-hv/ONAP_VES_HV_Architecture.pngbin0 -> 46724 bytes
-rw-r--r--docs/sections/services/ves-hv/WTP.yaml45
-rw-r--r--docs/sections/services/ves-hv/architecture.rst18
-rw-r--r--docs/sections/services/ves-hv/authorization.rst26
-rw-r--r--docs/sections/services/ves-hv/deployment.rst88
-rw-r--r--docs/sections/services/ves-hv/design.rst47
-rw-r--r--docs/sections/services/ves-hv/example-event.rst15
-rw-r--r--docs/sections/services/ves-hv/index.rst15
-rw-r--r--docs/sections/services/ves-hv/repositories.rst22
-rw-r--r--docs/sections/services/ves-hv/run-time-configuration.rst59
-rw-r--r--docs/sections/services/ves-hv/troubleshooting.rst506
14 files changed, 998 insertions, 42 deletions
diff --git a/docs/sections/apis/ves-hv/MeasDataCollection.proto b/docs/sections/apis/ves-hv/MeasDataCollection.proto
new file mode 100644
index 00000000..978cb28a
--- /dev/null
+++ b/docs/sections/apis/ves-hv/MeasDataCollection.proto
@@ -0,0 +1,78 @@
+/*
+ * ============LICENSE_START=======================================================
+ * dcaegen2-collectors-veshv
+ * ================================================================================
+ * Copyright (C) 2018 NOKIA
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+syntax = "proto3";
+package org.onap.ves;
+
+// Definition for RTPM, structure aligned with 3GPP PM format optimized for RTPM delivery pre-standard TS 28.550 V2.0.0 (2018-09).
+// Some field details are taken from 3GPP TS 32.436 V15.0.0 (2018-06) ASN.1 file.
+// Note (2018-09): work is in progress for 3GPP TS 28.550. Changes will be made, if needed, to align with final version.
+// Differences/additions to 3GPP TS 28.550 are marked with "%%".
+
+message MeasDataCollection // top-level message
+{
+ // %% Combined messageFileHeader, measData (single instance), messageFileFooter (not needed: timestamp = collectionBeginTime + granularityPeriod).
+ string formatVersion = 1; // required, current value "28.550 2.0"
+ uint32 granularityPeriod = 2; // required, duration in seconds, %% moved from MeasInfo (single reporting period per event)
+ string measuredEntityUserName = 3; // network function user definable name ("userLabel") defined for the measured entity in 3GPP TS 28.622
+ string measuredEntityDn = 4; // DN as per 3GPP TS 32.300
+ string measuredEntitySoftwareVersion = 5;
+ repeated string measObjInstIdList = 6; // %%: optional, monitored object LDNs as per 3GPP TS 32.300 and 3GPP TS 32.432
+ repeated MeasInfo measInfo = 7;
+}
+
+message MeasInfo
+{
+ oneof MeasInfoId { // measurement group identifier
+ uint32 iMeasInfoId = 1; // identifier as integer (%%: more compact)
+ string measInfoId = 2; // identifier as string (more generic)
+ }
+
+ oneof MeasTypes { // measurement identifiers associated with the measurement results
+ IMeasTypes iMeasTypes = 3; // identifiers as integers (%%: more compact)
+ SMeasTypes measTypes = 4; // identifiers as strings (more generic)
+ }
+ // Needed only because GPB does not support repeated fields directly inside 'oneof'
+ message IMeasTypes { repeated uint32 iMeasType = 1; }
+ message SMeasTypes { repeated string measType = 1; }
+
+ string jobId = 5;
+ repeated MeasValue measValues = 6; // performance measurements grouped by measurement object
+}
+
+message MeasValue
+{
+ oneof MeasObjInstId { // monitored object LDN as per 3GPP TS 32.300 and 3GPP TS 32.432
+ string measObjInstId = 1; // LDN itself
+ uint32 measObjInstIdListIdx = 2; // %%: index into measObjInstIdList (zero-based)
+ }
+ repeated MeasResult measResults = 3;
+ bool suspectFlag = 4;
+ map<string, string> measObjAddlFlds = 5; // %%: optional per-object data (name/value HashMap)
+}
+
+message MeasResult
+{
+ uint32 p = 1; // Index in the MeasTypes array (zero-based), needed only if measResults has fewer elements than MeasTypes
+ oneof xValue {
+ sint64 iValue = 2;
+ double rValue = 3;
+ bool isNull = 4;
+ }
+}
diff --git a/docs/sections/apis/ves-hv/Perf3gppFields.proto b/docs/sections/apis/ves-hv/Perf3gppFields.proto
new file mode 100644
index 00000000..453d1062
--- /dev/null
+++ b/docs/sections/apis/ves-hv/Perf3gppFields.proto
@@ -0,0 +1,37 @@
+/*
+ * ============LICENSE_START=======================================================
+ * dcaegen2-collectors-veshv
+ * ================================================================================
+ * Copyright (C) 2018 NOKIA
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+syntax = "proto3";
+package org.onap.ves;
+import "MeasDataCollection.proto"; // for 3GPP PM format
+
+message Perf3gppFields
+{
+ string perf3gppFieldsVersion = 1; // required, current value "1.0"
+ MeasDataCollection measDataCollection = 2; // required
+ // Based on 3GPP TS 28.550
+ // Logical mapping from 3GPP to ONAP header fields:
+ // 3GPP MeasFileHeader ONAP/VES CommonEventHeader
+ // senderName sourceName
+ // senderType nfNamingCode + nfcNamingCode
+ // vendorName nfVendorName
+ // collectionBeginTime startEpochMicrosec
+ // timestamp lastEpochMicrosec
+ map<string, string> eventAddlFlds = 3; // optional per-event data (name/value HashMap)
+}
diff --git a/docs/sections/apis/ves-hv/index.rst b/docs/sections/apis/ves-hv/index.rst
index 799f92d4..b707d9fd 100644
--- a/docs/sections/apis/ves-hv/index.rst
+++ b/docs/sections/apis/ves-hv/index.rst
@@ -8,7 +8,7 @@ HV-VES (High Volume VES)
:Date: 2018-10-05
.. contents::
- :depth: 3
+ :depth: 4
..
Overview
@@ -18,42 +18,98 @@ Component description can be found under `HV-VES Collector`_.
.. _HV-VES Collector: ../../services/ves-hv/index.html
+.. _tcp_endpoint:
TCP Endpoint
============
HV-VES is exposed as NodePort service on Kubernetes cluster on port 30222/tcp.
-It uses plain TCP connections tunneled in SSL/TLS or can be run in insecure manner without data encryption on the socket.
+It uses plain, insecure TCP connection without socket data encryption. In Casablanca release, there is an experimental option to enable SSL/TLS (see :ref:`authorization`).
Without TLS client authentication/authorization is not possible.
-Connections are stream-based (as opposed to request-based) and long running.
+Connections are stream-based (as opposed to request-based) and long-running.
Communication is wrapped with thin Wire Transfer Protocol, which mainly provides delimitation.
.. literalinclude:: WTP.asn
:language: asn
-Payload is binary-encoded, currently using Google Protocol Buffers representation of the VES Common Header.
+Payload is binary-encoded, using Google Protocol Buffers (GPB) representation of the VES Event.
.. literalinclude:: VesEvent.proto
:language: protobuf
-The PROTO file, which contains the VES CommonHeader, comes with a binary-type Payload parameter, where domain-specific
-data shall be placed. Domain-specific data are encoded as well with GPB, and they do require a domain-specific
-PROTO file to decode the data.
+HV-VES makes routing decisions based mostly on the content of the **Domain** parameter in the VES Common Event Header.
-HV-VES makes routing decisions based mostly on the content of the **Domain** parameter in VES Common Header.
+The PROTO file, which contains the VES CommonEventHeader, comes with a binary-type Payload (eventFields) parameter, where domain-specific
+data should be placed. Domain-specific data are encoded as well with GPB. A domain-specific PROTO file is required to decode the data.
+Domain **perf3gpp**
+===================
-Healthcheck
-===========
+The purpose of the **perf3gpp** domain is to deliver performance measurements from a network function (NF) to ONAP in 3GPP format.
+The first application of this domain is frequent periodic delivery of structured RAN PM data commonly referred to as Real Time PM (RTPM).
+The equipment sends an event right after collecting the PM data for a granularity period.
-Inside HV-VES docker container runs small http service for healthcheck - exact port for this service can be configured
-at deployment using `--health-check-api-port` command line option.
+The characteristics of each event in the **perf3gpp** domain:
-This service exposes single endpoint **GET /health/ready** which returns **HTTP 200 OK** in case HV-VES is healthy
-and ready for connections. Otherwise it returns **HTTP 503 Service Unavailable** with short reason of unhealthiness.
+- Single measured entity, for example, BTS
+- Single granularity period (collection *begin time* and *duration*)
+- Optional top-level grouping in one or more PM groups
+- Grouping in one or more measured objects, for example, cells
+- One or more reported PM values for each measured object
+Due to the single granularity period per event, single equipment supporting multiple concurrent granularity periods might send more than one event at a given reporting time.
+The **perf3gpp** domain is based on 3GPP specifications:
+- `3GPP TS 28.550 <http://www.3gpp.org/ftp//Specs/archive/28_series/28.550/>`_
+- `3GPP TS 32.431 <http://www.3gpp.org/ftp//Specs/archive/32_series/32.431/>`_
+- `3GPP TS 32.436 <http://www.3gpp.org/ftp//Specs/archive/32_series/32.436/>`_
+The event structure is changed in comparison to the one presented in 3GPP technical specifications. The 3GPP structure is enhanced to provide support for efficient transport.
+Definitions for the **perf3gpp** domain are stored in Perf3gppFields.proto and MeasDataCollection.proto, listed below:
+
+.. literalinclude:: Perf3gppFields.proto
+ :language: protobuf
+
+.. literalinclude:: MeasDataCollection.proto
+ :language: protobuf
+
+
+API towards DMaaP
+=================
+
+HV-VES Collector forwards incoming messages to a particular DMaaP Kafka topic based on the domain and configuration. Every Kafka record is comprised of a key and a value. In case of HV-VES:
+
+- **Kafka record key** is a GPB-encoded `CommonEventHeader`.
+- **Kafka record value** is a GPB-encoded `VesEvent` (`CommonEventHeader` and domain-specific `eventFields`).
+
+In both cases raw bytes might be extracted using ``org.apache.kafka.common.serialization.ByteArrayDeserializer``. The resulting bytes might be further passed to ``parseFrom`` methods included in classes generated from GPB definitions. WTP is not used here - it is only used in communication between PNF/VNF and the collector.
+
+
+.. _hv_ves_behaviors:
+
+HV-VES behaviors
+================
+
+Connections with HV-VES are stream-based (as opposed to request-based) and long-running. In case of interrupted or closed connection, the collector logs such event but does not try to reconnect to client.
+Communication is wrapped with thin Wire Transfer Protocol, which mainly provides delimitation. Wire Transfer Protocol Frame:
+
+- is dropped after decoding and validating and only GPB is used in further processing.
+- has to start with **MARKER_BYTE**, as defined in protocol specification (see :ref:`tcp_endpoint`). If **MARKER_BYTE** is invalid, HV-VES disconnects from client.
+
+HV-VES decodes only CommonEventHeader from GPB message received. Collector does not decode or validate the rest of the GPB message and publishes it to Kafka topic intact.
+Kafka topic for publishing events with specific domain can be configured through Consul service as described in :ref:`run_time_configuration`.
+In case of Kafka service unavailability, the collector drops currently handled messages and disconnects the client.
+
+Messages handling:
+
+- HV-VES Collector skips messages with unknown/invalid GPB CommonEventHeader format.
+- HV-VES Collector skips messages with unsupported domain. Domain is unsupported if there is no route for it in configuration (see :ref:`run_time_configuration`).
+- HV-VES Collector skips messages with invalid Wire Frame format, unsupported WTP version or inconsistencies of data in the frame (other than invalid **MARKER_BYTE**).
+- HV-VES Collector interrupts connection when it encounters a message with too big GPB payload. Default maximum size and ways to change it are described in :ref:`deployment`.
+
+.. note:: xNF (VNF/PNF) can split messages bigger than 1 MiB and set `sequence` field in CommonEventHeader accordingly. It is advised to use smaller than 1 MiB messages for GPBs encoding/decoding efficiency.
+
+- Skipped messages (for any of the above reasons) might not leave any trace in HV-VES logs.
diff --git a/docs/sections/services/ves-hv/ONAP_VES_HV_Architecture.png b/docs/sections/services/ves-hv/ONAP_VES_HV_Architecture.png
new file mode 100644
index 00000000..7652b970
--- /dev/null
+++ b/docs/sections/services/ves-hv/ONAP_VES_HV_Architecture.png
Binary files differ
diff --git a/docs/sections/services/ves-hv/WTP.yaml b/docs/sections/services/ves-hv/WTP.yaml
new file mode 100644
index 00000000..835ab309
--- /dev/null
+++ b/docs/sections/services/ves-hv/WTP.yaml
@@ -0,0 +1,45 @@
+WTP:
+ -- direct encoding using ASN.1 notation - WTP.asn
+ magic: 0xAA
+ versionMajor: 0x01
+ versionMinor: 0x00
+ reserved: 0x00 0x00 0x00
+ payloadId: 0x00 0x01
+ -- payloadLength set to the highest value 1MiB = 1024 * 1024 = 1048576 B
+ payloadLength: 0x00 0x10 0x00 0x00
+ payload:
+ -- GPB encoded payload - VesEvent.proto
+ commonEventHeader:
+ version: "1.0"
+ domain: "perf3gpp"
+ sequence: 0
+ priority: 1
+ eventId: "sampleEventId01"
+ eventName: "sampleEventName01"
+ lastEpochMicrosec: 120034455
+ startEpochMicrosec: 120034455
+ reportingEntityName: "sampleEntityName"
+ sourceName: "sampleSourceName"
+ vesEventListenerVersion: "anotherVersion"
+ eventFields:
+ -- GPB encoded fields for perf3gpp domain - Perf3gppFields.proto
+ perf3gppFieldsVersion: "1.0"
+ measDataCollection:
+ -- GPB encoded RTPM - MeasDataCollection.proto
+ formatVersion: "28.550 2.0"
+ granularityPeriod: 5
+ measuredEntityUserName: "sampleEntityUserName"
+ measuredEntityDn: "sampleEntityDn"
+ measuredEntitySoftwareVersion: "1.0"
+ measInfo:
+ - measInfo1:
+ iMeasInfoId: 1
+ iMeasTypes: 1
+ jobId: "sampleJobId"
+ measValues:
+ - measValue1:
+ measObjInstIdListIdx: 1
+ measResults:
+ p: 0
+ sint64 iValue: 63888
+ suspectFlag: false
diff --git a/docs/sections/services/ves-hv/architecture.rst b/docs/sections/services/ves-hv/architecture.rst
new file mode 100644
index 00000000..986e8bb3
--- /dev/null
+++ b/docs/sections/services/ves-hv/architecture.rst
@@ -0,0 +1,18 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _architecture:
+
+High-level architecture of HV-VES
+=================================
+
+HV-VES Collector is a part of DCAEGEN2. Its goal is to collect data from xNF (PNF/VNF) and publish it in DMaaP's Kafka.
+High Volume Collector is deployed with DCAEGEN2 via OOM Helm charts and Cloudify blueprints.
+
+Input messages come from TCP interface and Wire Transfer Protocol. Each frame includes Google Protocol Buffers (GPB) encoded payload.
+Based on information provided in CommonEventHeader, domain messages are validated and published to specific Kafka topic in DMaaP.
+
+.. image:: ONAP_VES_HV_Architecture.png
+
+Messages published in DMaaP's Kafka topic will be consumed by DCAE analytics application or other ONAP component that consumes messages from DMaaP/Kafka.
+DMaaP serves direct access to Kafka allowing other analytics applications to utilize its data.
diff --git a/docs/sections/services/ves-hv/authorization.rst b/docs/sections/services/ves-hv/authorization.rst
new file mode 100644
index 00000000..27efdf49
--- /dev/null
+++ b/docs/sections/services/ves-hv/authorization.rst
@@ -0,0 +1,26 @@
+ **WARNING: SSL/TLS authorization is a part of an experimental feature for ONAP Casablanca release and thus should be treated as unstable and subject to change in future releases.**
+
+.. _authorization:
+
+SSL/TLS authorization
+=====================
+
+HV-VES can be configured to require usage of SSL/TLS on every TCP connection. This can be done only during deployment of application container. For reference about exact commands, see :ref:`deployment`.
+
+General steps for configuring TLS for HV-VES collector:
+
+1. Create the collector's key-store in **PKCS #12** format and add HV-VES server certificate to it.
+2. Create the collector's trust-store in **PKCS #12** format with all trusted certificates and certification authorities. Every client with certificate signed by a Certificate Authority (CA) in chain of trust is allowed. The trust-store should not contain ONAP's root CAs.
+3. Start the collector with all required options specified.
+
+ .. code-block:: bash
+
+ docker run -v /path/to/key/and/trust/stores:/etc/hv-ves nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main --listen-port 6061 --config-url http://consul:8500/v1/kv/dcae-hv-ves-collector --key-store /etc/hv-ves/keystore.p12 --key-store-password keystorePass --trust-store /etc/hv-ves/truststore.p12 --trust-store-password truststorePass
+
+
+
+HV-VES uses OpenJDK (version 8u181) implementation of TLS ciphers. For reference, see https://docs.oracle.com/javase/8/docs/technotes/guides/security/overview/jsoverview.html.
+
+If SSL/TLS is enabled for HV-VES container then service turns on also client authentication. HV-VES requires clients to provide their certificates on connection. In addition, HV-VES provides its certificate to every client during SSL/TLS-handshake to enable two-way authorization.
+
+The service rejects any connection attempt that is not secured by SSL/TLS and every connection made by unauthorized client - this is client which certificate is not signed by CA contained within the HV-VES Collector trust store. With TLS tunneling, the communication protocol does not change (see the description in :ref:`hv_ves_behaviors`). In particular there is no change to Wire Frame Protocol.
diff --git a/docs/sections/services/ves-hv/deployment.rst b/docs/sections/services/ves-hv/deployment.rst
new file mode 100644
index 00000000..55529061
--- /dev/null
+++ b/docs/sections/services/ves-hv/deployment.rst
@@ -0,0 +1,88 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+.. _deployment:
+
+Deployment
+============
+
+To run HV-VES Collector container, you need to specify required parameters by passing them as command
+line arguments either by using long form (--long-form) or short form (-s) followed by argument if needed.
+
+All parameters can also be configured by specifying environment variables. These variables have to be named after command line option name
+rewritten using `UPPER_SNAKE_CASE` and prepended with `VESHV_` prefix, for example `VESHV_LISTEN_PORT`.
+
+Command line options have precedence over environment variables.
+
++-------------+------------+-------------------+----------+-----+-------------------------------------------------+
+| Long form | Short form | Env form | Required | Arg | Description |
++=============+============+===================+==========+=====+=================================================+
+| listen-port | p | VESHV_LISTEN_PORT | yes | yes | Port on which HV-VES listens internally |
++-------------+------------+-------------------+----------+-----+-------------------------------------------------+
+| config-url | c | VESHV_CONFIG_URL | yes | yes | URL of HV-VES configuration on Consul service |
++-------------+------------+-------------------+----------+-----+-------------------------------------------------+
+
+HV-VES requires also to specify if SSL should be used when handling incoming TCP connections.
+This can be done by passing the flag below to the command line.
+
++-------------+------------+-------------------+----------+-----+-------------------------------------------------+
+| Long form | Short form | Env form | Required | Arg | Description |
++=============+============+===================+==========+=====+=================================================+
+| ssl-disable | l | VESHV_SSL_DISABLE | no | no | Disables SSL encryption |
++-------------+------------+-------------------+----------+-----+-------------------------------------------------+
+
+
+Minimal command for running the container:
+
+.. code-block:: bash
+
+ docker run nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main --listen-port 6061 --config-url http://consul:8500/v1/kv/dcae-hv-ves-collector --ssl-disable
+
+Optional configuration parameters:
+
++-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
+| Long form | Short form | Env form | Required | Arg | Default | Description |
++=======================+============+============================+==========+=====+=================+=======================================================+
+| health-check-api-port | H | VESHV_HEALTHCHECK_API_PORT | no | yes | 6060 | Health check REST API listen port |
++-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
+| first-request-delay | d | VESHV_FIRST_REQUEST_DELAY | no | yes | 10 | Delay of first request to Consul service in seconds |
++-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
+| request-interval | I | VESHV_REQUEST_INTERVAL | no | yes | 5 | Interval of Consul configuration requests in seconds |
++-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
+| idle-timeout-sec | i | VESHV_IDLE_TIMEOUT_SEC | no | yes | 60 | Idle timeout for remote hosts. After given time |
+| | | | | | | without any data exchange, the connection |
+| | | | | | | might be closed. |
++-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
+| max-payload-size | m | VESHV_MAX_PAYLOAD_SIZE | no | yes | 1048576 (1 MiB) | Maximum supported payload size in bytes |
++-----------------------+------------+----------------------------+----------+-----+-----------------+-------------------------------------------------------+
+
+As part of experimental API if you do not specify `ssl-disable` flag, there is need to specify additional
+parameters for security configuration.
+
++-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
+| Long form | Short form | Env form | Required | Arg | Default | Description |
++=======================+============+============================+==========+=====+========================+==============================================================+
+| key-store | k | VESHV_KEY_STORE | no | yes | /etc/ves-hv/server.p12 | Key store in PKCS12 format path |
++-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
+| key-store-password | kp | VESHV_KEY_STORE_PASSWORD | no | yes | | Key store password |
++-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
+| trust-store | t | VESHV_TRUST_STORE | no | yes | /etc/ves-hv/trust.p12 | File with trusted certificate bundle in PKCS12 format path |
++-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
+| trust-store-password | tp | VESHV_TRUST_STORE_PASSWORD | no | yes | | Trust store password |
++-----------------------+------------+----------------------------+----------+-----+------------------------+--------------------------------------------------------------+
+
+Passwords are mandatory without ssl-disable flag. If key-store or trust-store location is not specified, HV-VES will try to read them from default locations.
+
+These parameters can be configured either by passing command line option during `docker run` call or
+by specifying environment variables named after command line option name
+rewritten using `UPPER_SNAKE_CASE` and prepended with `VESHV_` prefix e.g. `VESHV_LISTEN_PORT`.
+
+Healthcheck
+===========
+
+Inside HV-VES docker container runs small http service for healthcheck - exact port for this service can be configured
+at deployment using `--health-check-api-port` command line option.
+
+This service exposes single endpoint **GET /health/ready** which returns **HTTP 200 OK** in case HV-VES is healthy
+and ready for connections. Otherwise it returns **HTTP 503 Service Unavailable** with short reason of unhealthiness.
diff --git a/docs/sections/services/ves-hv/design.rst b/docs/sections/services/ves-hv/design.rst
index 8e7ce7ad..a6c2b864 100644
--- a/docs/sections/services/ves-hv/design.rst
+++ b/docs/sections/services/ves-hv/design.rst
@@ -1,6 +1,8 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
+.. _design:
+
Design
======
@@ -8,44 +10,43 @@ Design
Compatibility aspects (VES-JSON)
--------------------------------
-HV-VES Collector has been designed as a high-volume variant of the existing VES(JSON) collector, and not a completely new collector.
-HV-VES follows the VES-JSON schema - as much as possible
+HV-VES Collector is a high-volume variant of the existing VES (JSON) collector, and not a completely new collector.
+HV-VES follows the VES-JSON schema as much as possible.
-- It uses a Google Protocol Buffers ( GPB/PROTO ) representation of the VES Common Header
-- The PROTO files tend to use most encoding effective types defined by GPB to cover Common Header fields.
-- It makes routing decisions based mostly on the content of the "Domain" parameter
-- It allows to embed Payload of different types (by default PERF3GPP domain is included)
+- HV-VES uses a Google Protocol Buffers (GPB, proto files) representation of the VES Common Header.
+- The proto files use most encoding effective types defined by GPB to cover Common Header fields.
+- HV-VES makes routing decisions based mostly on the content of the **Domain** parameter.
+- HV-VES allows to embed Payload of different types (by default perf3gpp domain is included).
Analytics applications impacts
-- An analytics application operating on high-volume data needs to be prepared to read directly from Kafka
-- An analytics application need to operate on GPB encoded data in order to benefit from GPB encoding efficiencies
-- It is assumed, that due to the nature of high volume data, there would have to be dedicated applications provided,
-able to operate on such volumes of data.
+- An analytics application operating on high-volume data needs to be prepared to read directly from Kafka.
+- An analytics application needs to operate on GPB encoded data in order to benefit from GPB encoding efficiencies.
+- It is assumed, that due to the nature of high volume data, there would have to be dedicated applications provided, able to operate on such volumes of data.
Extendability
-------------
-HV-VES was designed to allow for extendability - by adding new domain-specific PROTO files.
+HV-VES is designed to allow extending by adding new domain-specific proto files.
-The PROTO file, which contains the VES CommonHeader, comes with a binary-type Payload parameter, where domain-specific data shall be placed.
-Domain-specific data are encoded as well with GPB, and they do require a domain-specific PROTO file to decode the data.
-This domain-specific PROTO needs to be shared with analytics applications - HV-VES is not analyzing domain-specific data.
+The proto file (with the VES CommonHeader) comes with a binary-type **Payload** parameter, where domain-specific data should be placed.
+Domain-specific data are encoded as well with GPB. A domain-specific proto file is required to decode the data.
+This domain-specific proto has to be shared with analytics applications - HV-VES does not analyze domain-specific data.
-In order to support the RT-PM use-case, HV-VES includes a "PERF3GPP" domain PROTO file, as within this domain,
-the high volume data is expected to be reported to HV-VES collector.
-Still, there are no limitations to define additional domains, based on existing VES domains (like Fault, Heartbeat)
-or completely new domains. New domains can be added "when needed".
+In order to support the RT-PM use-case, HV-VES includes a **perf3gpp** domain proto file. Within this domain, high volume data are expected to be reported to HV-VES collector.
+Additional domains can be defined based on existing VES domains (like Fault, Heartbeat) or completely new domains. New domains can be added when needed.
-GPB PROTO files are backwards compatible, and such a new domain could be added without affecting existing systems.
+GPB proto files are backwards compatible, and a new domain can be added without affecting existing systems.
-Analytics applications will have to be as well equipped with this new domain-specific PROTO file.
-Currently, these additional, domain specific proto files could be simply added to respective repos of HV-VES collector.
+Analytics applications have to be equipped with the new domain-specific proto file as well.
+Currently, these additional, domain specific proto files can be added to respective repos of HV-VES collector.
Implementation details
----------------------
- Project Reactor is used as a backbone of the internal architecture.
- Netty is used by means of reactor-netty library.
-- We are using Kotlin so we can write very concise code with great interoperability with existing Java libraries.
-- Types defined in Λrrow library are also used when it improves readability or general cleanness of the code. \ No newline at end of file
+- Kotlin is used to write concise code with great interoperability with existing Java libraries.
+- Types defined in Λrrow library are also used when it improves readability or general cleanness of the code.
+
+
diff --git a/docs/sections/services/ves-hv/example-event.rst b/docs/sections/services/ves-hv/example-event.rst
new file mode 100644
index 00000000..3a335395
--- /dev/null
+++ b/docs/sections/services/ves-hv/example-event.rst
@@ -0,0 +1,15 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+HV-VES example event
+=====================
+
+HV-VES Collector accepts messages in the format provided in :ref:`tcp_endpoint`.
+
+This Yaml file represents a message with sample values. It should be treated as an abstract example showing the structure of the message.
+The message consists of several parts. Each part is encoded in a different way. Encoding is noted with inline comments with proper file names.
+
+Values of fields can be changed according to types specified in noted definition files.
+
+.. literalinclude:: WTP.yaml
+ :language: yaml
diff --git a/docs/sections/services/ves-hv/index.rst b/docs/sections/services/ves-hv/index.rst
index 483ddbd7..8dec5693 100644
--- a/docs/sections/services/ves-hv/index.rst
+++ b/docs/sections/services/ves-hv/index.rst
@@ -26,10 +26,15 @@ High Volume VES Collector overview and functions
.. toctree::
:maxdepth: 1
- ./design.rst
- ./configuration.rst
- ./delivery.rst
- ./installation.rst
+
+ ./architecture
+ ./design
+ ./run-time-configuration
+ ./repositories
+ ./deployment
+ ./troubleshooting
`Offered APIs`_
+ ./authorization
+ ./example-event
-.. _`Offered APIs`: ../../apis/ves-hv.rst \ No newline at end of file
+.. _`Offered APIs`: ../../apis/ves-hv
diff --git a/docs/sections/services/ves-hv/repositories.rst b/docs/sections/services/ves-hv/repositories.rst
new file mode 100644
index 00000000..98034f20
--- /dev/null
+++ b/docs/sections/services/ves-hv/repositories.rst
@@ -0,0 +1,22 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _repositories:
+
+Repositories
+============
+
+**HV-VES** is delivered as a docker container and published in ONAP Nexus repository following image naming convention.
+
+Full image name is `onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main`_.
+
+.. _`onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main`: https://nexus3.onap.org/#browse/search=keyword%3Dmain:7f6379d32f8dd78f1ec5ed038decc99e
+
+There are also simulators published as docker images. Those simulators are used internally during CSIT tests.
+
+Full simulators' names are `onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-dcae-app-simulator`_ and `onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-xnf-simulator`_.
+
+.. _`onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-dcae-app-simulator`: https://nexus3.onap.org/#browse/search=keyword%3Dapp-simulator%20AND%20version%3Dlatest:22b3686a9064fa3d301b54dedc8da8d1
+.. _`onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-xnf-simulator`: https://nexus3.onap.org/#browse/search=keyword%3Dapp-simulator%20AND%20version%3Dlatest:912d0fe7b8192392927ae1ac6dcb50ea
+
+For source code, see https://gerrit.onap.org/r/#/admin/projects/dcaegen2/collectors/hv-ves.
diff --git a/docs/sections/services/ves-hv/run-time-configuration.rst b/docs/sections/services/ves-hv/run-time-configuration.rst
new file mode 100644
index 00000000..76d622c6
--- /dev/null
+++ b/docs/sections/services/ves-hv/run-time-configuration.rst
@@ -0,0 +1,59 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _run_time_configuration:
+
+Run-Time configuration
+======================
+
+(see :ref:`deployment`)
+
+HV-VES can fetch configuration directly from Consul service in the following JSON format:
+
+.. code-block:: json
+
+ {
+ "dmaap.kafkaBootstrapServers": "message-router-kafka:9093",
+ "collector.routing": [
+ {
+ "fromDomain": "perf3gpp",
+ "toTopic": "HV_VES_PERF3GPP"
+ },
+ {
+ "fromDomain": "heartbeat",
+ "toTopic": "HV_VES_HEARTBEAT"
+ },
+ ...
+ ]
+ }
+
+HV-VES does not verify the correctness of configuration data and uses them as is, in particular:
+
+- **KafkaBootstrapServers** is used as host name and port for publishing events to Kafka service.
+- Every **routing** array object specifies one event publishing route.
+
+ - **fromDomain** node should be a case-sensitive string of single domain taken from VES Common Event Header specification.
+ - **toTopic** should be a case-sensitive string of Kafka topic.
+ - When HV-VES receives VES Event, it checks the domain contained in it. If the route from that domain to any topic exists in configuration, then HV-VES publishes that event to topic in that route.
+ - If there are two routes from the same domain to different topics, then it is undefined which route will be used.
+
+The configuration is created from HV-VES Cloudify blueprint by specifying **application_config** node during ONAP OOM/Kubernetes deployment. Example of the node specification:
+
+.. code-block:: YAML
+
+ node_templates:
+ hv-ves:
+ properties:
+ application_config:
+ dmaap.kafkaBootstrapServers: message-router-kafka:9092
+ collector.routing:
+ fromDomain: perf3gpp
+ toTopic: HV_VES_PERF3GPP
+
+Endpoint on which HV-VES seeks configuration can be set during deployment as described in :ref:`deployment`.
+
+HV-VES waits 10 seconds (default, configurable during deplyoment with **first-request-delay** option, see :ref:`deployment`) before the first attempt to retrieve configuration from Consul. This is to prevent possible synchronization issues. During that time HV-VES declines any connection attempts from xNF (VNF/PNF).
+
+After first request, HV-VES asks for configuration in fixed intervals, configurable from command line (**request-interval**). By defualt interval is set to 5 seconds.
+
+In case of failing to retrieve configuration, collector temporarily extends this interval and retries. After five unsuccessfull attempts, container becomes unhealthy and cannot recover. HV-VES in this state is unusable and the container should be restarted.
diff --git a/docs/sections/services/ves-hv/troubleshooting.rst b/docs/sections/services/ves-hv/troubleshooting.rst
new file mode 100644
index 00000000..49037738
--- /dev/null
+++ b/docs/sections/services/ves-hv/troubleshooting.rst
@@ -0,0 +1,506 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+.. _troubleshooting:
+
+Troubleshooting
+===============
+
+Deployment/Installation errors
+------------------------------
+
+**Missing required parameters**
+
+::
+
+ Unexpected error when parsing command line arguments
+ usage: java org.onap.dcae.collectors.veshv.main.MainKt
+ Required parameters: p, c
+ -c,--config-url <arg> URL of ves configuration on consul
+ -d,--first-request-delay <arg> Delay of first request to consul in
+ seconds
+ -H,--health-check-api-port <arg> Health check rest api listen port
+ -I,--request-interval <arg> Interval of consul configuration
+ requests in seconds
+ -i,--idle-timeout-sec <arg> Idle timeout for remote hosts. After
+ given time without any data exchange
+ the
+ connection might be closed.
+ -k,--key-store <arg> Key store in PKCS12 format
+ -kp,--key-store-password <arg> Key store password
+ -l,--ssl-disable Disable SSL encryption
+ -m,--max-payload-size <arg> Maximum supported payload size in
+ bytes
+ -p,--listen-port <arg> Listen port
+ -t,--trust-store <arg> File with trusted certificate bundle
+ in PKCS12 format
+ -tp,--trust-store-password <arg> Trust store password
+ -u,--dummy If present will start in dummy mode
+ (dummy external services)
+ All parameters can be specified as environment variables using
+ upper-snake-case full name with prefix `VESHV_`.
+
+
+This log message is printed when you do not specify the required parameters (via command line, or in environment variables).
+As described in the above log message, there are a few required parameters:
+**listen port**, **config url** and either **trust store password** and **key store password** if you want to use SSL, or only **ssl disable** if not.
+
+To get rid of this error, specify the required parameters. For example:
+
+- Via command line:
+
+::
+
+ <hv-ves run command> --listen-port 6061 --config-url http://consul-url/key-path --key-store-password password --trust-store-password password
+
+- By defining environment variables:
+
+::
+
+ export VESHV_LISTEN_PORT=6061
+ export VESHV_CONFIG_URL=http://consul-url/key-path
+ export VESHV_KEY_STORE_PASSWORD=password
+ export VESHV_TRUST_STORE_PASSWORD=password
+
+**NOTE**
+
+Command line arguments have priority over environment variables. If you configure a parameter in both ways, **HV-VES** applies the one from the command line.
+
+For more information about **HV-VES** configuration parameters, see :ref:`deployment`.
+
+Configuration errors
+--------------------
+
+**Consul service not responding**
+
+::
+
+ ap.dcae.collectors.veshv.impl.adapters.HttpAdapter | 2018-10-16T13:13:01.155Z | ERROR | Failed to get resource on path: http://localhost:8500/v1/hv/veshv-config (Connection refused: localhost/127.0.0.1:8500) | | reactor-http-client-epoll-8
+ ap.dcae.collectors.veshv.impl.adapters.HttpAdapter | 2018-10-16T13:13:01.155Z | DEBUG | Nested exception: | java.net.ConnectException: Connection refused
+ ... 10 common frames omitted
+ Wrapped by: io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: localhost/127.0.0.1:8500
+ at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
+ at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717)
+ at io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:327)
+ at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:340)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:616)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:563)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:480)
+ at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
+ at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
+ at java.lang.Thread.run(Thread.java:748)
+ | reactor-http-client-epoll-8
+ rs.veshv.impl.adapters.ConsulConfigurationProvider | 2018-10-16T13:13:01.163Z | WARN | Could not get fresh configuration | java.net.ConnectException: Connection refused
+ ... 10 common frames omitted
+ Wrapped by: io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: localhost/127.0.0.1:8500
+ at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
+ at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717)
+ at io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:327)
+ at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:340)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:616)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:563)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:480)
+ at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
+ at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
+ at java.lang.Thread.run(Thread.java:748)
+ | reactor-http-client-epoll-8
+
+
+
+The above three logs indicate that **HV-VES** cannot connect to the Consul service under url given in **--consul-url** parameter.
+Make sure Consul is up and running and the **ip + port** combination is correct.
+
+====
+
+**Missing configuration on Consul**
+
+::
+
+ ap.dcae.collectors.veshv.impl.adapters.HttpAdapter | 2018-10-16T13:52:20.585Z | ERROR | Failed to get resource on path: http://consul:8500/v1/kv/veshv-config (HTTP request failed with code: 404.
+ Failing URI: /v1/kv/veshv-config) | | reactor-http-nio-1
+ ap.dcae.collectors.veshv.impl.adapters.HttpAdapter | 2018-10-16T13:52:20.586Z | DEBUG | Nested exception: | reactor.ipc.netty.http.client.HttpClientException: HTTP request failed with code: 404.
+ Failing URI: /v1/kv/veshv-config
+ | reactor-http-nio-1
+ rs.veshv.impl.adapters.ConsulConfigurationProvider | 2018-10-16T13:52:20.591Z | WARN | Could not get fresh configuration | reactor.ipc.netty.http.client.HttpClientException: HTTP request failed with code: 404.
+ Failing URI: /v1/kv/veshv-config
+ | reactor-http-nio-1
+
+
+**HV-VES** logs this information when connected to Consul, but cannot find any json configuration under given key which in this case is **veshv-config**.
+For more information, see :ref:`run_time_configuration`
+
+====
+
+**Invalid configuration format**
+
+::
+
+ rs.veshv.impl.adapters.ConsulConfigurationProvider | 2018-10-16T14:06:14.792Z | INFO | Obtained new configuration from consul:
+ {
+ "invalidKey": "value"
+ } | | reactor-http-nio-1
+ rs.veshv.impl.adapters.ConsulConfigurationProvider | 2018-10-16T14:06:14.796Z | WARN | Could not get fresh configuration | java.lang.NullPointerException: null
+ at org.glassfish.json.JsonObjectBuilderImpl$JsonObjectImpl.getString(JsonObjectBuilderImpl.java:257)
+ at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider.createCollectorConfiguration(ConsulConfigurationProvider.kt:113)
+ at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider.access$createCollectorConfiguration(ConsulConfigurationProvider.kt:44)
+ at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider$invoke$6.invoke(ConsulConfigurationProvider.kt:80)
+ at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider$invoke$6.invoke(ConsulConfigurationProvider.kt:44)
+ at org.onap.dcae.collectors.veshv.impl.adapters.ConsulConfigurationProvider$sam$java_util_function_Function$0.apply(ConsulConfigurationProvider.kt)
+ at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:100)
+ at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:108)
+ at reactor.core.publisher.FluxFlatMap$FlatMapMain.tryEmitScalar(FluxFlatMap.java:432)
+ at reactor.core.publisher.FluxFlatMap$FlatMapMain.onNext(FluxFlatMap.java:366)
+ at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:108)
+ at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:108)
+ at reactor.core.publisher.FluxFlatMap$FlatMapMain.tryEmit(FluxFlatMap.java:484)
+ at reactor.core.publisher.FluxFlatMap$FlatMapInner.onNext(FluxFlatMap.java:916)
+ at reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber.onNext(FluxMapFuseable.java:115)
+ at reactor.core.publisher.Operators$MonoSubscriber.complete(Operators.java:1083)
+ at reactor.core.publisher.MonoFlatMap$FlatMapInner.onNext(MonoFlatMap.java:241)
+ at reactor.core.publisher.MonoNext$NextSubscriber.onNext(MonoNext.java:76)
+ at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:108)
+ at reactor.core.publisher.FluxFilter$FilterSubscriber.onNext(FluxFilter.java:97)
+ at reactor.ipc.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:213)
+ at reactor.ipc.netty.channel.FluxReceive.onInboundNext(FluxReceive.java:329)
+ at reactor.ipc.netty.channel.ChannelOperations.onInboundNext(ChannelOperations.java:311)
+ at reactor.ipc.netty.http.client.HttpClientOperations.onInboundNext(HttpClientOperations.java:605)
+ at reactor.ipc.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:138)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
+ at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
+ at io.netty.channel.CombinedChannelDuplexHandler$DelegatingChannelHandlerContext.fireChannelRead(CombinedChannelDuplexHandler.java:438)
+ at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)
+ at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)
+ at io.netty.channel.CombinedChannelDuplexHandler.channelRead(CombinedChannelDuplexHandler.java:253)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
+ at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
+ at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
+ at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
+ at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:628)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:563)
+ at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:480)
+ at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
+ at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
+ at java.lang.Thread.run(Thread.java:748)
+ | reactor-http-nio-1
+
+
+This log is printed when you upload a configuration in an invalid format (for example, with missing fields). In the first log you can see that configuration on Consul is:
+
+.. code-block:: json
+
+ {
+ "invalidKey": "value"
+ }
+
+The above is not a valid **HV-VES** configuration, therefore **HV-VES** does not apply it and becomes **unhealthy**.
+For more information on **Consul configuration**, see :ref:`run_time_configuration`.
+
+
+Message handling errors
+-----------------------
+
+**Handling messages when invalid kafka url is specified**
+
+::
+
+ | reactor-tcp-server-epoll-6
+ org.apache.kafka.clients.ClientUtils | 2018-10-19T08:29:36.917Z | WARN | Removing server invalid-kafka:9093 from bootstrap.servers as DNS resolution failed for invalid-kafka | | reactor-tcp-server-epoll-6
+ org.apache.kafka.clients.producer.KafkaProducer | 2018-10-19T08:29:36.918Z | INFO | [Producer clientId=producer-1] Closing the Kafka producer with timeoutMillis = 0 ms. | | reactor-tcp-server-epoll-6
+ org.onap.dcae.collectors.veshv.impl.VesHvCollector | 2018-10-19T08:29:36.962Z | WARN | Error while handling message stream: org.apache.kafka.common.KafkaException (Failed to construct kafka producer) | | reactor-tcp-server-epoll-6
+ org.onap.dcae.collectors.veshv.impl.VesHvCollector | 2018-10-19T08:29:36.966Z | DEBUG | Detailed stack trace | org.apache.kafka.common.config.ConfigException: No resolvable bootstrap urls given in bootstrap.servers
+ at org.apache.kafka.clients.ClientUtils.parseAndValidateAddresses(ClientUtils.java:64)
+ at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:396)
+ ... 24 common frames omitted
+ Wrapped by: org.apache.kafka.common.KafkaException: Failed to construct kafka producer
+ at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:441)
+ at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:285)
+ at reactor.kafka.sender.internals.ProducerFactory.createProducer(ProducerFactory.java:33)
+ at reactor.kafka.sender.internals.DefaultKafkaSender.lambda$new$0(DefaultKafkaSender.java:90)
+ at reactor.core.publisher.MonoCallable.subscribe(MonoCallable.java:57)
+ at reactor.core.publisher.MonoPeekFuseable.subscribe(MonoPeekFuseable.java:74)
+ at reactor.core.publisher.Mono.subscribe(Mono.java:3088)
+ at reactor.core.publisher.MonoProcessor.add(MonoProcessor.java:531)
+ at reactor.core.publisher.MonoProcessor.subscribe(MonoProcessor.java:444)
+ at reactor.core.publisher.MonoFlatMapMany.subscribe(MonoFlatMapMany.java:49)
+ at reactor.core.publisher.FluxPeek.subscribe(FluxPeek.java:80)
+ at reactor.core.publisher.FluxFilter.subscribe(FluxFilter.java:52)
+ at reactor.core.publisher.FluxMap.subscribe(FluxMap.java:62)
+ at reactor.core.publisher.FluxDefer.subscribe(FluxDefer.java:55)
+ at reactor.core.publisher.FluxPeek.subscribe(FluxPeek.java:83)
+ at reactor.core.publisher.FluxDoFinally.subscribe(FluxDoFinally.java:73)
+ at reactor.core.publisher.FluxOnErrorResume.subscribe(FluxOnErrorResume.java:47)
+ at reactor.core.publisher.MonoIgnoreElements.subscribe(MonoIgnoreElements.java:37)
+ at reactor.ipc.netty.channel.ChannelOperations.applyHandler(ChannelOperations.java:381)
+ at reactor.ipc.netty.channel.ChannelOperations.onHandlerStart(ChannelOperations.java:296)
+ at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:163)
+ at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:404)
+ at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:315)
+ at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
+ at java.lang.Thread.run(Thread.java:748)
+ | reactor-tcp-server-epoll-6
+ p.dcae.collectors.veshv.impl.socket.NettyTcpServer | 2018-10-19T08:29:36.971Z | INFO | Connection from /172.26.0.6:55574 has been closed | | reactor-tcp-server-epoll-6
+
+
+**HV-VES** responds with the above when it handles a message and currently applied configuration has invalid Kafka bootstrap server defined.
+The configuration read from Consul in this case:
+
+.. code-block:: json
+
+ {
+ "dmaap.kafkaBootstrapServers": "invalid-kafka:9093",
+ "collector.routing": [
+ {
+ "fromDomain": "perf3gpp",
+ "toTopic": "HV_VES_PERF3GPP"
+ },
+ {
+ "fromDomain": "heartbeat",
+ "toTopic": "HV_VES_HEARTBEAT"
+ }
+ ]
+ }
+
+where **invalid-kafka:9093** is not an existing **ip+port** combination.
+
+
+====
+
+**First creation of topics on kafka**
+
+
+On the first try of creating and publishing to a given kafka topic, **HV-VES** logs the following warnings and creates the requested topics anyway.
+
+::
+
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:11:53.396Z | WARN | [Producer clientId=producer-1] Error while fetching metadata with correlation id 1 : {HV_VES_PERF3GPP=LEADER_NOT_AVAILABLE} | | kafka-producer-network-thread | producer-1
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:11:53.524Z | WARN | [Producer clientId=producer-1] Error while fetching metadata with correlation id 3 : {HV_VES_PERF3GPP=LEADER_NOT_AVAILABLE} | | kafka-producer-network-thread | producer-1
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:11:53.636Z | WARN | [Producer clientId=producer-1] Error while fetching metadata with correlation id 4 : {HV_VES_PERF3GPP=LEADER_NOT_AVAILABLE} | | kafka-producer-network-thread | producer-1
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:11:53.749Z | WARN | [Producer clientId=producer-1] Error while fetching metadata with correlation id 5 : {HV_VES_PERF3GPP=LEADER_NOT_AVAILABLE} | | kafka-producer-network-thread | producer-1
+
+====
+
+**Kafka service became unavailable after producer for a given topic was successfully created**
+
+
+After receiving a **Ves Common Event**, **HV-VES** creates a producer for a given topic and keeps it for the whole lifetime of an application.
+If Kafka service becomes unreachable after the producer creation, you will see the following logs when trying to establish another connection with the Kafka server.
+
+::
+
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:04:08.604Z | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available. | | kafka-producer-network-thread | producer-1
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:04:11.896Z | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available. | | kafka-producer-network-thread | producer-1
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:04:14.968Z | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available. | | kafka-producer-network-thread | producer-1
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:04:18.040Z | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available. | | kafka-producer-network-thread | producer-1
+ org.apache.kafka.clients.NetworkClient | 2018-10-22T10:04:21.111Z | WARN | [Producer clientId=producer-1] Connection to node 1001 could not be established. Broker may not be available. | | kafka-producer-network-thread | producer-1
+ reactor.kafka.sender.internals.DefaultKafkaSender | 2018-10-22T10:04:23.519Z | ERROR | error {} | org.apache.kafka.common.errors.TimeoutException: Expiring 1 record(s) for HV_VES_PERF3GPP-0: 30050 ms has passed since batch creation plus linger time
+ | kafka-producer-network-thread | producer-1
+ cae.collectors.veshv.impl.adapters.kafka.KafkaSink | 2018-10-22T10:04:23.522Z | WARN | Failed to send message to Kafka | org.apache.kafka.common.errors.TimeoutException: Expiring 1 record(s) for HV_VES_PERF3GPP-0: 30050 ms has passed since batch creation plus linger time
+ | single-1
+ org.onap.dcae.collectors.veshv.impl.VesHvCollector | 2018-10-22T10:04:23.528Z | WARN | Error while handling message stream: org.apache.kafka.common.errors.TimeoutException (Expiring 1 record(s) for HV_VES_PERF3GPP-0: 30050 ms has passed since batch creation plus linger time) | | single-1
+
+To resolve this issue, **HV-VES** restart is required.
+
+====
+
+**Message with too big payload size**
+
+::
+
+ g.onap.dcae.collectors.veshv.impl.VesHvCollector | 2018-10-19T08:53:18.349Z | WARN | Error while handling message stream: org.onap.dcae.collectors.veshv.impl.wire.WireFrameException (PayloadSizeExceeded: payload size exceeds the limit (1048576 bytes)) | | single-1
+ org.onap.dcae.collectors.veshv.impl.VesHvCollector | 2018-10-19T08:53:18.349Z | DEBUG | Detailed stack trace | org.onap.dcae.collectors.veshv.impl.wire.WireFrameException: PayloadSizeExceeded: payload size exceeds the limit (1048576 bytes)
+ at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$onError$1$1.invoke(WireChunkDecoder.kt:67)
+ at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$onError$1$1.invoke(WireChunkDecoder.kt:38)
+ at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:28)
+ at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:22)
+ at arrow.effects.IORunLoop.step(IORunLoop.kt:50)
+ at arrow.effects.IO.unsafeRunTimed(IO.kt:109)
+ at arrow.effects.IO.unsafeRunSync(IO.kt:106)
+ at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$generateFrames$1.accept(WireChunkDecoder.kt:61)
+ at org.onap.dcae.collectors.veshv.impl.wire.WireChunkDecoder$generateFrames$1.accept(WireChunkDecoder.kt:38)
+ at reactor.core.publisher.FluxGenerate.lambda$new$1(FluxGenerate.java:56)
+ at reactor.core.publisher.FluxGenerate$GenerateSubscription.slowPath(FluxGenerate.java:262)
+ at reactor.core.publisher.FluxGenerate$GenerateSubscription.request(FluxGenerate.java:204)
+ at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.request(FluxPeekFuseable.java:138)
+ at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.set(Operators.java:1454)
+ at reactor.core.publisher.Operators$MultiSubscriptionSubscriber.onSubscribe(Operators.java:1328)
+ at reactor.core.publisher.FluxPeekFuseable$PeekFuseableSubscriber.onSubscribe(FluxPeekFuseable.java:172)
+ at reactor.core.publisher.FluxGenerate.subscribe(FluxGenerate.java:83)
+ at reactor.core.publisher.FluxPeekFuseable.subscribe(FluxPeekFuseable.java:86)
+ at reactor.core.publisher.FluxDefer.subscribe(FluxDefer.java:55)
+ at reactor.core.publisher.Flux.subscribe(Flux.java:6877)
+ at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.drain(FluxConcatMap.java:418)
+ at reactor.core.publisher.FluxConcatMap$ConcatMapImmediate.onNext(FluxConcatMap.java:241)
+ at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:185)
+ at reactor.core.publisher.FluxPeek$PeekSubscriber.onNext(FluxPeek.java:185)
+ at reactor.core.publisher.FluxMap$MapSubscriber.onNext(FluxMap.java:108)
+ at reactor.ipc.netty.channel.FluxReceive.drainReceiver(FluxReceive.java:213)
+ at reactor.ipc.netty.channel.FluxReceive.onInboundNext(FluxReceive.java:329)
+ at reactor.ipc.netty.channel.ChannelOperations.onInboundNext(ChannelOperations.java:311)
+ at reactor.ipc.netty.channel.ChannelOperationsHandler.channelRead(ChannelOperationsHandler.java:138)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
+ at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
+ at io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:286)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
+ at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)
+ at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1434)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)
+ at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)
+ at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:965)
+ at io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:808)
+ at io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:410)
+ at io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:310)
+ at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:884)
+ at java.lang.Thread.run(Thread.java:748)
+ | single-1
+ p.dcae.collectors.veshv.impl.socket.NettyTcpServer | 2018-10-19T08:53:18.351Z | INFO | Connection from /172.26.0.6:56924 has been closed | | single-1
+
+
+
+The above log is printed when the message payload size is too big. **HV-VES** does not handle messages that exceed specified payload size. Default value is **1048576 bytes (1MiB)**, but it can be configured via cmd or by environment variables.
+
+
+
+====
+
+**Other invalid messages**
+
+
+Messages with **invalid wire frame** or **invalid gpb** data are ommitted and **HV-VES** only logs the connection-related logs as follows:
+
+::
+
+ p.dcae.collectors.veshv.impl.socket.NettyTcpServer | 2018-10-19T09:03:03.345Z | INFO | Handling connection from /172.26.0.6:57432 | | reactor-tcp-server-epoll-5
+ p.dcae.collectors.veshv.impl.socket.NettyTcpServer | 2018-10-19T09:04:03.351Z | INFO | Idle timeout of 60 s reached. Closing connection from /172.26.0.6:57432... | | reactor-tcp-server-epoll-5
+ p.dcae.collectors.veshv.impl.socket.NettyTcpServer | 2018-10-19T09:04:03.353Z | INFO | Connection from /172.26.0.6:57432 has been closed | | reactor-tcp-server-epoll-5
+ p.dcae.collectors.veshv.impl.socket.NettyTcpServer | 2018-10-19T09:04:03.354Z | DEBUG | Channel (/172.26.0.6:57432) closed successfully. | | reactor-tcp-server-epoll-5
+
+
+For more information, see the :ref:`hv_ves_behaviors` section.
+
+Authorization related errors
+----------------------------
+
+**WARNING: SSL/TLS authorization is a part of an experimental feature for ONAP Casablanca release and should be treated as unstable and subject to change in future releases.**
+**For more information, see** :ref:`authorization`.
+
+**Key or trust store missing**
+
+::
+
+ org.onap.dcae.collectors.veshv.main | 2018-10-22T06:51:54.191Z | ERROR | Failed to start a server | java.io.FileNotFoundException: /etc/ves-hv/server.p12 (No such file or directory)
+ at java.io.FileInputStream.open0(Native Method)
+ at java.io.FileInputStream.open(FileInputStream.java:195)
+ at java.io.FileInputStream.<init>(FileInputStream.java:138)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.UtilsKt$streamFromFile$1.invoke(utils.kt:79)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.UtilsKt$streamFromFile$1.invoke(utils.kt)
+ at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.loadKeyStoreFromFile(SslFactories.kt:50)
+ at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.keyManagerFactory(SslFactories.kt:43)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.ServerSslContextFactory.jdkContext(ServerSslContextFactory.kt:42)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContextWithConfiguredCerts(SslContextFactory.kt:49)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContext(SslContextFactory.kt:39)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.configureServer(NettyTcpServer.kt:61)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.access$configureServer(NettyTcpServer.kt:46)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:52)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:46)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$sam$java_util_function_Consumer$0.accept(NettyTcpServer.kt)
+ at reactor.ipc.netty.tcp.TcpServer.<init>(TcpServer.java:149)
+ at reactor.ipc.netty.tcp.TcpServer$Builder.build(TcpServer.java:278)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:53)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:46)
+ at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:28)
+ at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:22)
+ at arrow.effects.IORunLoop.step(IORunLoop.kt:50)
+ at arrow.effects.IO.unsafeRunTimed(IO.kt:109)
+ at arrow.effects.IO.unsafeRunSync(IO.kt:106)
+ at org.onap.dcae.collectors.veshv.utils.arrow.EffectsKt.unsafeRunEitherSync(effects.kt:50)
+ at org.onap.dcae.collectors.veshv.main.MainKt.main(main.kt:41)
+ | main
+
+
+The above error is logged when key store is not provided. Similarly, when trust store is not provided, **/etc/ves-hv/trust.p12** file missing is logged.
+**server.p12** and **trust.p12** are default names of key and trust stores. They can be changed by specifying **--trust-store** or **--key-store** command line arguments on deployment.
+
+====
+
+**Invalid credentials**
+
+::
+
+ org.onap.dcae.collectors.veshv.main | 2018-10-22T06:59:24.039Z | ERROR | Failed to start a server | java.security.UnrecoverableKeyException: failed to decrypt safe contents entry: javax.crypto.BadPaddingException: Given final block not properly padded. Such issues can arise if a bad key is used during decryption.
+ ... 23 common frames omitted
+ Wrapped by: java.io.IOException: keystore password was incorrect
+ at sun.security.pkcs12.PKCS12KeyStore.engineLoad(PKCS12KeyStore.java:2059)
+ at java.security.KeyStore.load(KeyStore.java:1445)
+ at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.loadKeyStoreFromFile(SslFactories.kt:51)
+ at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.keyManagerFactory(SslFactories.kt:43)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.ServerSslContextFactory.jdkContext(ServerSslContextFactory.kt:42)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContextWithConfiguredCerts(SslContextFactory.kt:49)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContext(SslContextFactory.kt:39)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.configureServer(NettyTcpServer.kt:61)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.access$configureServer(NettyTcpServer.kt:46)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:52)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:46)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$sam$java_util_function_Consumer$0.accept(NettyTcpServer.kt)
+ at reactor.ipc.netty.tcp.TcpServer.<init>(TcpServer.java:149)
+ at reactor.ipc.netty.tcp.TcpServer$Builder.build(TcpServer.java:278)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:53)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:46)
+ at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:28)
+ at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:22)
+ at arrow.effects.IORunLoop.step(IORunLoop.kt:50)
+ at arrow.effects.IO.unsafeRunTimed(IO.kt:109)
+ at arrow.effects.IO.unsafeRunSync(IO.kt:106)
+ at org.onap.dcae.collectors.veshv.utils.arrow.EffectsKt.unsafeRunEitherSync(effects.kt:50)
+ at org.onap.dcae.collectors.veshv.main.MainKt.main(main.kt:41)
+ | main
+
+
+Key or trust store password provided in configuration is invalid.
+
+====
+
+**Invalid key store file**
+
+::
+
+ org.onap.dcae.collectors.veshv.main | 2018-10-22T09:11:38.200Z | ERROR | Failed to start a server | java.io.IOException: DerInputStream.getLength(): lengthTag=111, too big.
+ at sun.security.util.DerInputStream.getLength(DerInputStream.java:599)
+ at sun.security.util.DerValue.init(DerValue.java:391)
+ at sun.security.util.DerValue.<init>(DerValue.java:332)
+ at sun.security.util.DerValue.<init>(DerValue.java:345)
+ at sun.security.pkcs12.PKCS12KeyStore.engineLoad(PKCS12KeyStore.java:1938)
+ at java.security.KeyStore.load(KeyStore.java:1445)
+ at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.loadKeyStoreFromFile(SslFactories.kt:51)
+ at org.onap.dcae.collectors.veshv.ssl.impl.SslFactories.keyManagerFactory(SslFactories.kt:43)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.ServerSslContextFactory.jdkContext(ServerSslContextFactory.kt:42)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContextWithConfiguredCerts(SslContextFactory.kt:49)
+ at org.onap.dcae.collectors.veshv.ssl.boundary.SslContextFactory.createSslContext(SslContextFactory.kt:39)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.configureServer(NettyTcpServer.kt:61)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer.access$configureServer(NettyTcpServer.kt:46)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:52)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1$ctx$1.invoke(NettyTcpServer.kt:46)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$sam$java_util_function_Consumer$0.accept(NettyTcpServer.kt)
+ at reactor.ipc.netty.tcp.TcpServer.<init>(TcpServer.java:149)
+ at reactor.ipc.netty.tcp.TcpServer$Builder.build(TcpServer.java:278)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:53)
+ at org.onap.dcae.collectors.veshv.impl.socket.NettyTcpServer$start$1.invoke(NettyTcpServer.kt:46)
+ at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:28)
+ at arrow.effects.IO$Companion$invoke$1.invoke(IO.kt:22)
+ at arrow.effects.IORunLoop.step(IORunLoop.kt:50)
+ at arrow.effects.IO.unsafeRunTimed(IO.kt:109)
+ at arrow.effects.IO.unsafeRunSync(IO.kt:106)
+ at org.onap.dcae.collectors.veshv.utils.arrow.EffectsKt.unsafeRunEitherSync(effects.kt:50)
+ at org.onap.dcae.collectors.veshv.main.MainKt.main(main.kt:41)
+ | main
+
+The above is logged when provided keystore has invalid or corrupted content.
+This log also appears when you try to use key store/trust store in archive format other than **PKCS12** (the only supported by **HV-VES** store type).
+
+