diff options
author | k.kedron <k.kedron@partner.samsung.com> | 2019-08-28 14:31:52 +0200 |
---|---|---|
committer | Ofir Sonsino <ofir.sonsino@intl.att.com> | 2019-08-30 07:46:34 +0000 |
commit | 64636c2148414c1fa5da4b46a69570cdab003837 (patch) | |
tree | ada4dc163443427420217de5e716b532329ff255 /docker/docker_be/chef-repo/cookbooks | |
parent | 5a4e1827b867a2de46c14f32449b37d0ff60d1fd (diff) |
Fully HTTPS support in the dcaedt-be
Fully HTTPS support:
-Updated the onap/base_sdc-jetty docker image version
-Updated the chef script to properly used of the new docker image
-Updated jvm configuration to support call to
the SDC components using HTTPS.
-Add support for change the http to https in the python script
-Added buildRestClient method to create the CloseableHttpClient
supporting the SSL connection
-Checkstyle in the recipes
-Update the docker_run.sh:
- Change JAVA_OPTIONS
- Used the secure connection to do health check
Issue-ID: SDC-2477
Signed-off-by: Krystian Kedron <k.kedron@partner.samsung.com>
Change-Id: I7bf3d307e5765fa75a37ba0a4b41fd7fa87d28ab
Diffstat (limited to 'docker/docker_be/chef-repo/cookbooks')
17 files changed, 4613 insertions, 0 deletions
diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/README.md b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/README.md new file mode 100644 index 0000000..5d1fd04 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/README.md @@ -0,0 +1,13 @@ +# Deploy-DCAE + +1. default.rb = file that defines default to parameters used inside the cookbook +2. recipes/dcae_setup.rb = create and configure application.properties and logback-spring.xml using template defined in dcae-application.properties.erb with values from defaults.rb or from environments.json +3. recipes/jetty_setup.rb = configure the jetty (ssl) +4. roles/dcae-fe.json = chef roles (which recipe to run) +5. Dockerfile = SDC Base line jettystartup.sh = run all of the above and run docker-entrypoint.sh that starts the jetty +6. pom.xml = docker profile. + tag for onap + build-helper-maven-plugin = extract version from the war + maven-resources-plugin - copy the war to docker/target + docker-maven-plugin = creates the docker - configured with the dockerFile location + diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/attributes/default.rb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/attributes/default.rb new file mode 100644 index 0000000..af6ad1e --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/attributes/default.rb @@ -0,0 +1,24 @@ +default['JETTY_BASE'] = "/var/lib/jetty" +default['JETTY_HOME'] = "/usr/local/jetty" +default['APP_LOG_DIR'] = "/opt/logs/be" + +default['DCAE']['consumerName'] = "dcaeDesigner" +default['DCAE']['consumerPass'] = "Aa123456" + +default['DCAE']['BE']['http_port'] = 8082 +default['DCAE']['BE']['https_port'] = 8444 +default['DCAE']['TOSCA_LAB']['http_port'] = 8085 + +default['SDC']['BE']['http_port'] = 8080 +default['SDC']['BE']['https_port'] = 8443 +default['DCAE']['TOSCA_LAB']['https_port'] = 8085 +# TO CHANGE THE TRUSTSTORE CERT THE JVM CONFIGURATION +# MUST BE ALSO CHANGE IN THE startup.sh FILE +default['jetty']['keystore_pwd'] = "rTIS;B4kM]2GHcNK2c3B4&Ng" +default['jetty']['keymanager_pwd'] = "rTIS;B4kM]2GHcNK2c3B4&Ng" +default['jetty']['truststore_pwd'] = "Y,f975ZNJfVZhV*{+Y[}pA?0" + +default['disableHttp'] = true + +default['DCAE_TOSCA_LAB_VIP'] = "localhost" + diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/CommonEventFormat_v4.1.json b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/CommonEventFormat_v4.1.json new file mode 100644 index 0000000..9c3ad36 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/CommonEventFormat_v4.1.json @@ -0,0 +1,1123 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "definitions": { + "codecsInUse": { + "description": "number of times an identified codec was used over the measurementInterval", + "type": "object", + "properties": { + "codecIdentifier": { "type": "string" }, + "numberInUse": { "type": "number" } + }, + "required": [ "codecIdentifier", "numberInUse" ] + }, + "command": { + "description": "command from an event collector toward an event source", + "type": "object", + "properties": { + "commandType": { + "type": "string", + "enum": [ + "heartbeatIntervalChange", + "measurementIntervalChange", + "provideThrottlingState", + "throttlingSpecification" + ] + }, + "eventDomainThrottleSpecification": { "$ref": "#/definitions/eventDomainThrottleSpecification" }, + "measurementInterval": { "type": "number" } + }, + "required": [ "commandType" ] + }, + "commandList": { + "description": "array of commands from an event collector toward an event source", + "type": "array", + "items": { + "$ref": "#/definitions/commandListEntry" + }, + "minItems": 0 + }, + "commandListEntry": { + "description": "reference to a command object", + "type": "object", + "properties": { + "command": {"$ref": "#/definitions/command"} + }, + "required": [ "command" ] + }, + "commonEventHeader": { + "description": "fields common to all events", + "type": "object", + "properties": { + "domain": { + "description": "the eventing domain associated with the event", + "type": "string", + "enum": [ + "fault", + "heartbeat", + "measurementsForVfScaling", + "mobileFlow", + "other", + "stateChange", + "syslog", + "thresholdCrossingAlert" + ] + }, + "eventId": { + "description": "event key that is unique to the event source", + "type": "string" + }, + "eventType": { + "description": "unique event topic name", + "type": "string" + }, + "functionalRole": { + "description": "function of the event source e.g., eNodeB, MME, PCRF", + "type": "string" + }, + "internalHeaderFields": { "$ref": "#/definitions/internalHeaderFields" }, + "lastEpochMicrosec": { + "description": "the latest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "priority": { + "description": "processing priority", + "type": "string", + "enum": [ + "High", + "Medium", + "Normal", + "Low" + ] + }, + "reportingEntityId": { + "description": "UUID identifying the entity reporting the event, for example an OAM VM; must be populated by the ATT enrichment process", + "type": "string" + }, + "reportingEntityName": { + "description": "name of the entity reporting the event, for example, an OAM VM", + "type": "string" + }, + "sequence": { + "description": "ordering of events communicated by an event source instance or 0 if not needed", + "type": "integer" + }, + "sourceId": { + "description": "UUID identifying the entity experiencing the event issue; must be populated by the ATT enrichment process", + "type": "string" + }, + "sourceName": { + "description": "name of the entity experiencing the event issue", + "type": "string" + }, + "startEpochMicrosec": { + "description": "the earliest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "version": { + "description": "version of the event header", + "type": "number" + } + }, + "required": [ "domain", "eventId", "functionalRole", "lastEpochMicrosec", + "priority", "reportingEntityName", "sequence", + "sourceName", "startEpochMicrosec" ] + }, + "counter": { + "description": "performance counter", + "type": "object", + "properties": { + "criticality": { "type": "string", "enum": [ "CRIT", "MAJ" ] }, + "name": { "type": "string" }, + "thresholdCrossed": { "type": "string" }, + "value": { "type": "string"} + }, + "required": [ "criticality", "name", "thresholdCrossed", "value" ] + }, + "cpuUsage": { + "description": "percent usage of an identified CPU", + "type": "object", + "properties": { + "cpuIdentifier": { "type": "string" }, + "percentUsage": { "type": "number" } + }, + "required": [ "cpuIdentifier", "percentUsage" ] + }, + "errors": { + "description": "receive and transmit errors for the measurements domain", + "type": "object", + "properties": { + "receiveDiscards": { "type": "number" }, + "receiveErrors": { "type": "number" }, + "transmitDiscards": { "type": "number" }, + "transmitErrors": { "type": "number" } + }, + "required": [ "receiveDiscards", "receiveErrors", "transmitDiscards", "transmitErrors" ] + }, + "event": { + "description": "the root level of the common event format", + "type": "object", + "properties": { + "commonEventHeader": { "$ref": "#/definitions/commonEventHeader" }, + "faultFields": { "$ref": "#/definitions/faultFields" }, + "measurementsForVfScalingFields": { "$ref": "#/definitions/measurementsForVfScalingFields" }, + "mobileFlowFields": { "$ref": "#/definitions/mobileFlowFields" }, + "otherFields": { "$ref": "#/definitions/otherFields" }, + "stateChangeFields": { "$ref": "#/definitions/stateChangeFields" }, + "syslogFields": { "$ref": "#/definitions/syslogFields" }, + "thresholdCrossingAlertFields": { "$ref": "#/definitions/thresholdCrossingAlertFields" } + }, + "required": [ "commonEventHeader" ] + }, + "eventDomainThrottleSpecification": { + "description": "specification of what information to suppress within an event domain", + "type": "object", + "properties": { + "eventDomain": { + "description": "Event domain enum from the commonEventHeader domain field", + "type": "string" + }, + "suppressedFieldNames": { + "description": "List of optional field names in the event block that should not be sent to the Event Listener", + "type": "array", + "items": { + "type": "string" + } + }, + "suppressedNvPairsList": { + "description": "Optional list of specific NvPairsNames to suppress within a given Name-Value Field", + "type": "array", + "items": { + "$ref": "#/definitions/suppressedNvPairs" + } + } + }, + "required": [ "eventDomain" ] + }, + "eventDomainThrottleSpecificationList": { + "description": "array of eventDomainThrottleSpecifications", + "type": "array", + "items": { + "$ref": "#/definitions/eventDomainThrottleSpecification" + }, + "minItems": 0 + }, + "eventList": { + "description": "array of events", + "type": "array", + "items": { + "$ref": "#/definitions/event" + } + }, + "eventThrottlingState": { + "description": "reports the throttling in force at the event source", + "type": "object", + "properties": { + "eventThrottlingMode": { + "description": "Mode the event manager is in", + "type": "string", + "enum": [ + "normal", + "throttled" + ] + }, + "eventDomainThrottleSpecificationList": { "$ref": "#/definitions/eventDomainThrottleSpecificationList" } + }, + "required": [ "eventThrottlingMode" ] + }, + "faultFields": { + "description": "fields specific to fault events", + "type": "object", + "properties": { + "alarmAdditionalInformation": { + "description": "additional alarm information", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "alarmCondition": { + "description": "alarm condition reported by the device", + "type": "string" + }, + "alarmInterfaceA": { + "description": "card, port, channel or interface name of the device generating the alarm", + "type": "string" + }, + "eventSeverity": { + "description": "event severity or priority", + "type": "string", + "enum": [ + "CRITICAL", + "MAJOR", + "MINOR", + "WARNING", + "NORMAL" + ] + }, + "eventSourceType": { + "description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction", + "type": "string" + }, + "faultFieldsVersion": { + "description": "version of the faultFields block", + "type": "number" + }, + "specificProblem": { + "description": "short description of the alarm or problem", + "type": "string" + }, + "vfStatus": { + "description": "virtual function status enumeration", + "type": "string", + "enum": [ + "Active", + "Idle", + "Preparing to terminate", + "Ready to terminate", + "Requesting termination" + ] + } + }, + "required": [ "alarmCondition", "eventSeverity", + "eventSourceType", "specificProblem", "vfStatus" ] + }, + "featuresInUse": { + "description": "number of times an identified feature was used over the measurementInterval", + "type": "object", + "properties": { + "featureIdentifier": { "type": "string" }, + "featureUtilization": { "type": "number" } + }, + "required": [ "featureIdentifier", "featureUtilization" ] + }, + "field": { + "description": "name value pair", + "type": "object", + "properties": { + "name": { "type": "string" }, + "value": { "type": "string" } + }, + "required": [ "name", "value" ] + }, + "filesystemUsage": { + "description": "disk usage of an identified virtual machine in gigabytes and/or gigabytes per second", + "type": "object", + "properties": { + "blockConfigured": { "type": "number" }, + "blockIops": { "type": "number" }, + "blockUsed": { "type": "number" }, + "ephemeralConfigured": { "type": "number" }, + "ephemeralIops": { "type": "number" }, + "ephemeralUsed": { "type": "number" }, + "filesystemName": { "type": "string" } + }, + "required": [ "blockConfigured", "blockIops", "blockUsed", "ephemeralConfigured", + "ephemeralIops", "ephemeralUsed", "filesystemName" ] + }, + "gtpPerFlowMetrics": { + "description": "Mobility GTP Protocol per flow metrics", + "type": "object", + "properties": { + "avgBitErrorRate": { + "description": "average bit error rate", + "type": "number" + }, + "avgPacketDelayVariation": { + "description": "Average packet delay variation or jitter in milliseconds for received packets: Average difference between the packet timestamp and time received for all pairs of consecutive packets", + "type": "number" + }, + "avgPacketLatency": { + "description": "average delivery latency", + "type": "number" + }, + "avgReceiveThroughput": { + "description": "average receive throughput", + "type": "number" + }, + "avgTransmitThroughput": { + "description": "average transmit throughput", + "type": "number" + }, + "durConnectionFailedStatus": { + "description": "duration of failed state in milliseconds, computed as the cumulative time between a failed echo request and the next following successful error request, over this reporting interval", + "type": "number" + }, + "durTunnelFailedStatus": { + "description": "Duration of errored state, computed as the cumulative time between a tunnel error indicator and the next following non-errored indicator, over this reporting interval", + "type": "number" + }, + "flowActivatedBy": { + "description": "Endpoint activating the flow", + "type": "string" + }, + "flowActivationEpoch": { + "description": "Time the connection is activated in the flow (connection) being reported on, or transmission time of the first packet if activation time is not available", + "type": "number" + }, + "flowActivationMicrosec": { + "description": "Integer microseconds for the start of the flow connection", + "type": "number" + }, + "flowActivationTime": { + "description": "time the connection is activated in the flow being reported on, or transmission time of the first packet if activation time is not available; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "flowDeactivatedBy": { + "description": "Endpoint deactivating the flow", + "type": "string" + }, + "flowDeactivationEpoch": { + "description": "Time for the start of the flow connection, in integer UTC epoch time aka UNIX time", + "type": "number" + }, + "flowDeactivationMicrosec": { + "description": "Integer microseconds for the start of the flow connection", + "type": "number" + }, + "flowDeactivationTime": { + "description": "Transmission time of the first packet in the flow connection being reported on; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "flowStatus": { + "description": "connection status at reporting time as a working / inactive / failed indicator value", + "type": "string" + }, + "gtpConnectionStatus": { + "description": "Current connection state at reporting time", + "type": "string" + }, + "gtpTunnelStatus": { + "description": "Current tunnel state at reporting time", + "type": "string" + }, + "ipTosCountList": { + "description": "array of key: value pairs where the keys are drawn from the IP Type-of-Service identifiers which range from '0' to '255', and the values are the count of packets that had those ToS identifiers in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { "type": "string" }, + { "type": "number" } + ] + } + }, + "ipTosList": { + "description": "Array of unique IP Type-of-Service values observed in the flow where values range from '0' to '255'", + "type": "array", + "items": { + "type": "string" + } + }, + "largePacketRtt": { + "description": "large packet round trip time", + "type": "number" + }, + "largePacketThreshold": { + "description": "large packet threshold being applied", + "type": "number" + }, + "maxPacketDelayVariation": { + "description": "Maximum packet delay variation or jitter in milliseconds for received packets: Maximum of the difference between the packet timestamp and time received for all pairs of consecutive packets", + "type": "number" + }, + "maxReceiveBitRate": { + "description": "maximum receive bit rate", + "type": "number" + }, + "maxTransmitBitRate": { + "description": "maximum transmit bit rate", + "type": "number" + }, + "mobileQciCosCountList": { + "description": "array of key: value pairs where the keys are drawn from LTE QCI or UMTS class of service strings, and the values are the count of packets that had those strings in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { "type": "string" }, + { "type": "number" } + ] + } + }, + "mobileQciCosList": { + "description": "Array of unique LTE QCI or UMTS class-of-service values observed in the flow", + "type": "array", + "items": { + "type": "string" + } + }, + "numActivationFailures": { + "description": "Number of failed activation requests, as observed by the reporting node", + "type": "number" + }, + "numBitErrors": { + "description": "number of errored bits", + "type": "number" + }, + "numBytesReceived": { + "description": "number of bytes received, including retransmissions", + "type": "number" + }, + "numBytesTransmitted": { + "description": "number of bytes transmitted, including retransmissions", + "type": "number" + }, + "numDroppedPackets": { + "description": "number of received packets dropped due to errors per virtual interface", + "type": "number" + }, + "numGtpEchoFailures": { + "description": "Number of Echo request path failures where failed paths are defined in 3GPP TS 29.281 sec 7.2.1 and 3GPP TS 29.060 sec. 11.2", + "type": "number" + }, + "numGtpTunnelErrors": { + "description": "Number of tunnel error indications where errors are defined in 3GPP TS 29.281 sec 7.3.1 and 3GPP TS 29.060 sec. 11.1", + "type": "number" + }, + "numHttpErrors": { + "description": "Http error count", + "type": "number" + }, + "numL7BytesReceived": { + "description": "number of tunneled layer 7 bytes received, including retransmissions", + "type": "number" + }, + "numL7BytesTransmitted": { + "description": "number of tunneled layer 7 bytes transmitted, excluding retransmissions", + "type": "number" + }, + "numLostPackets": { + "description": "number of lost packets", + "type": "number" + }, + "numOutOfOrderPackets": { + "description": "number of out-of-order packets", + "type": "number" + }, + "numPacketErrors": { + "description": "number of errored packets", + "type": "number" + }, + "numPacketsReceivedExclRetrans": { + "description": "number of packets received, excluding retransmission", + "type": "number" + }, + "numPacketsReceivedInclRetrans": { + "description": "number of packets received, including retransmission", + "type": "number" + }, + "numPacketsTransmittedInclRetrans": { + "description": "number of packets transmitted, including retransmissions", + "type": "number" + }, + "numRetries": { + "description": "number of packet retries", + "type": "number" + }, + "numTimeouts": { + "description": "number of packet timeouts", + "type": "number" + }, + "numTunneledL7BytesReceived": { + "description": "number of tunneled layer 7 bytes received, excluding retransmissions", + "type": "number" + }, + "roundTripTime": { + "description": "round trip time", + "type": "number" + }, + "tcpFlagCountList": { + "description": "array of key: value pairs where the keys are drawn from TCP Flags and the values are the count of packets that had that TCP Flag in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { "type": "string" }, + { "type": "number" } + ] + } + }, + "tcpFlagList": { + "description": "Array of unique TCP Flags observed in the flow", + "type": "array", + "items": { + "type": "string" + } + }, + "timeToFirstByte": { + "description": "Time in milliseconds between the connection activation and first byte received", + "type": "number" + } + }, + "required": [ "avgBitErrorRate", "avgPacketDelayVariation", "avgPacketLatency", + "avgReceiveThroughput", "avgTransmitThroughput", + "flowActivationEpoch", "flowActivationMicrosec", + "flowDeactivationEpoch", "flowDeactivationMicrosec", + "flowDeactivationTime", "flowStatus", + "maxPacketDelayVariation", "numActivationFailures", + "numBitErrors", "numBytesReceived", "numBytesTransmitted", + "numDroppedPackets", "numL7BytesReceived", + "numL7BytesTransmitted", "numLostPackets", + "numOutOfOrderPackets", "numPacketErrors", + "numPacketsReceivedExclRetrans", + "numPacketsReceivedInclRetrans", + "numPacketsTransmittedInclRetrans", + "numRetries", "numTimeouts", "numTunneledL7BytesReceived", + "roundTripTime", "timeToFirstByte" + ] + }, + "internalHeaderFields": { + "description": "enrichment fields for internal VES Event Listener service use only, not supplied by event sources", + "type": "object" + }, + "latencyBucketMeasure": { + "description": "number of counts falling within a defined latency bucket", + "type": "object", + "properties": { + "countsInTheBucket": { "type": "number" }, + "highEndOfLatencyBucket": { "type": "number" }, + "lowEndOfLatencyBucket": { "type": "number" } + }, + "required": [ "countsInTheBucket" ] + }, + "measurementGroup": { + "description": "measurement group", + "type": "object", + "properties": { + "name": { "type": "string" }, + "measurements": { + "description": "array of name value pair measurements", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + } + }, + "required": [ "name", "measurements" ] + }, + "measurementsForVfScalingFields": { + "description": "measurementsForVfScaling fields", + "type": "object", + "properties": { + "additionalMeasurements": { + "description": "additional measurement fields", + "type": "array", + "items": { + "$ref": "#/definitions/measurementGroup" + } + }, + "aggregateCpuUsage": { + "description": "aggregate CPU usage of the VM on which the VNFC reporting the event is running", + "type": "number" + }, + "codecUsageArray": { + "description": "array of codecs in use", + "type": "array", + "items": { + "$ref": "#/definitions/codecsInUse" + } + }, + "concurrentSessions": { + "description": "peak concurrent sessions for the VM or VNF over the measurementInterval", + "type": "number" + }, + "configuredEntities": { + "description": "over the measurementInterval, peak total number of: users, subscribers, devices, adjacencies, etc., for the VM, or subscribers, devices, etc., for the VNF", + "type": "number" + }, + "cpuUsageArray": { + "description": "usage of an array of CPUs", + "type": "array", + "items": { + "$ref": "#/definitions/cpuUsage" + } + }, + "errors": { "$ref": "#/definitions/errors" }, + "featureUsageArray": { + "description": "array of features in use", + "type": "array", + "items": { + "$ref": "#/definitions/featuresInUse" + } + }, + "filesystemUsageArray": { + "description": "filesystem usage of the VM on which the VNFC reporting the event is running", + "type": "array", + "items": { + "$ref": "#/definitions/filesystemUsage" + } + }, + "latencyDistribution": { + "description": "array of integers representing counts of requests whose latency in milliseconds falls within per-VNF configured ranges", + "type": "array", + "items": { + "$ref": "#/definitions/latencyBucketMeasure" + } + }, + "meanRequestLatency": { + "description": "mean seconds required to respond to each request for the VM on which the VNFC reporting the event is running", + "type": "number" + }, + "measurementInterval": { + "description": "interval over which measurements are being reported in seconds", + "type": "number" + }, + "measurementsForVfScalingVersion": { + "description": "version of the measurementsForVfScaling block", + "type": "number" + }, + "memoryConfigured": { + "description": "memory in MB configured in the VM on which the VNFC reporting the event is running", + "type": "number" + }, + "memoryUsed": { + "description": "memory usage in MB of the VM on which the VNFC reporting the event is running", + "type": "number" + }, + "numberOfMediaPortsInUse": { + "description": "number of media ports in use", + "type": "number" + }, + "requestRate": { + "description": "peak rate of service requests per second to the VNF over the measurementInterval", + "type": "number" + }, + "vnfcScalingMetric": { + "description": "represents busy-ness of the VNF from 0 to 100 as reported by the VNFC", + "type": "number" + }, + "vNicUsageArray": { + "description": "usage of an array of virtual network interface cards", + "type": "array", + "items": { + "$ref": "#/definitions/vNicUsage" + } + } + }, + "required": [ "measurementInterval" ] + }, + "mobileFlowFields": { + "description": "mobileFlow fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional mobileFlow fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "applicationType": { + "description": "Application type inferred", + "type": "string" + }, + "appProtocolType": { + "description": "application protocol", + "type": "string" + }, + "appProtocolVersion": { + "description": "application protocol version", + "type": "string" + }, + "cid": { + "description": "cell id", + "type": "string" + }, + "connectionType": { + "description": "Abbreviation referencing a 3GPP reference point e.g., S1-U, S11, etc", + "type": "string" + }, + "ecgi": { + "description": "Evolved Cell Global Id", + "type": "string" + }, + "flowDirection": { + "description": "Flow direction, indicating if the reporting node is the source of the flow or destination for the flow", + "type": "string" + }, + "gtpPerFlowMetrics": { "$ref": "#/definitions/gtpPerFlowMetrics" }, + "gtpProtocolType": { + "description": "GTP protocol", + "type": "string" + }, + "gtpVersion": { + "description": "GTP protocol version", + "type": "string" + }, + "httpHeader": { + "description": "HTTP request header, if the flow connects to a node referenced by HTTP", + "type": "string" + }, + "imei": { + "description": "IMEI for the subscriber UE used in this flow, if the flow connects to a mobile device", + "type": "string" + }, + "imsi": { + "description": "IMSI for the subscriber UE used in this flow, if the flow connects to a mobile device", + "type": "string" + }, + "ipProtocolType": { + "description": "IP protocol type e.g., TCP, UDP, RTP...", + "type": "string" + }, + "ipVersion": { + "description": "IP protocol version e.g., IPv4, IPv6", + "type": "string" + }, + "lac": { + "description": "location area code", + "type": "string" + }, + "mcc": { + "description": "mobile country code", + "type": "string" + }, + "mnc": { + "description": "mobile network code", + "type": "string" + }, + "mobileFlowFieldsVersion": { + "description": "version of the mobileFlowFields block", + "type": "number" + }, + "msisdn": { + "description": "MSISDN for the subscriber UE used in this flow, as an integer, if the flow connects to a mobile device", + "type": "string" + }, + "otherEndpointIpAddress": { + "description": "IP address for the other endpoint, as used for the flow being reported on", + "type": "string" + }, + "otherEndpointPort": { + "description": "IP Port for the reporting entity, as used for the flow being reported on", + "type": "number" + }, + "otherFunctionalRole": { + "description": "Functional role of the other endpoint for the flow being reported on e.g., MME, S-GW, P-GW, PCRF...", + "type": "string" + }, + "rac": { + "description": "routing area code", + "type": "string" + }, + "radioAccessTechnology": { + "description": "Radio Access Technology e.g., 2G, 3G, LTE", + "type": "string" + }, + "reportingEndpointIpAddr": { + "description": "IP address for the reporting entity, as used for the flow being reported on", + "type": "string" + }, + "reportingEndpointPort": { + "description": "IP port for the reporting entity, as used for the flow being reported on", + "type": "number" + }, + "sac": { + "description": "service area code", + "type": "string" + }, + "samplingAlgorithm": { + "description": "Integer identifier for the sampling algorithm or rule being applied in calculating the flow metrics if metrics are calculated based on a sample of packets, or 0 if no sampling is applied", + "type": "number" + }, + "tac": { + "description": "transport area code", + "type": "string" + }, + "tunnelId": { + "description": "tunnel identifier", + "type": "string" + }, + "vlanId": { + "description": "VLAN identifier used by this flow", + "type": "string" + } + }, + "required": [ "flowDirection", "gtpPerFlowMetrics", "ipProtocolType", + "ipVersion", "otherEndpointIpAddress", "otherEndpointPort", + "reportingEndpointIpAddr", "reportingEndpointPort" ] + }, + "otherFields": { + "description": "additional fields not reported elsewhere", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "requestError": { + "description": "standard request error data structure", + "type": "object", + "properties": { + "messageId": { + "description": "Unique message identifier of the format ABCnnnn where ABC is either SVC for Service Exceptions or POL for Policy Exception", + "type": "string" + }, + "text": { + "description": "Message text, with replacement variables marked with %n, where n is an index into the list of <variables> elements, starting at 1", + "type": "string" + }, + "url": { + "description": "Hyperlink to a detailed error resource e.g., an HTML page for browser user agents", + "type": "string" + }, + "variables": { + "description": "List of zero or more strings that represent the contents of the variables used by the message text", + "type": "string" + } + }, + "required": [ "messageId", "text" ] + }, + "stateChangeFields": { + "description": "stateChange fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional stateChange fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "newState": { + "description": "new state of the entity", + "type": "string", + "enum": [ + "inService", + "maintenance", + "outOfService" + ] + }, + "oldState": { + "description": "previous state of the entity", + "type": "string", + "enum": [ + "inService", + "maintenance", + "outOfService" + ] + }, + "stateChangeFieldsVersion": { + "description": "version of the stateChangeFields block", + "type": "number" + }, + "stateInterface": { + "description": "card or port name of the entity that changed state", + "type": "string" + } + }, + "required": [ "newState", "oldState", "stateInterface" ] + }, + "suppressedNvPairs": { + "description": "List of specific NvPairsNames to suppress within a given Name-Value Field for event Throttling", + "type": "object", + "properties": { + "nvPairFieldName": { + "description": "Name of the field within which are the nvpair names to suppress", + "type": "string" + }, + "suppressedNvPairNames": { + "description": "Array of nvpair names to suppress within the nvpairFieldName", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ "nvPairFieldName", "suppressedNvPairNames" ] + }, + "syslogFields": { + "description": "sysLog fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional syslog fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "eventSourceHost": { + "description": "hostname of the device", + "type": "string" + }, + "eventSourceType": { + "description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction", + "type": "string" + }, + "syslogFacility": { + "description": "numeric code from 0 to 23 for facility--see table in documentation", + "type": "number" + }, + "syslogFieldsVersion": { + "description": "version of the syslogFields block", + "type": "number" + }, + "syslogMsg": { + "description": "syslog message", + "type": "string" + }, + "syslogPri": { + "description": "0-192 combined severity and facility", + "type": "number" + }, + "syslogProc": { + "description": "identifies the application that originated the message", + "type": "string" + }, + "syslogProcId": { + "description": "a change in the value of this field indicates a discontinuity in syslog reporting", + "type": "number" + }, + "syslogSData": { + "description": "syslog structured data consisting of a structured data Id followed by a set of key value pairs", + "type": "string" + }, + "syslogSdId": { + "description": "0-32 char in format name@number for example ourSDID@32473", + "type": "string" + }, + "syslogSev": { + "description": "numerical Code for severity derived from syslogPri as remaider of syslogPri / 8", + "type": "string" + }, + "syslogTag": { + "description": "msgId indicating the type of message such as TCPOUT or TCPIN; NILVALUE should be used when no other value can be provided", + "type": "string" + }, + "syslogVer": { + "description": "IANA assigned version of the syslog protocol specification - typically 1", + "type": "number" + } + }, + "required": [ "eventSourceType", "syslogMsg", "syslogTag" ] + }, + "thresholdCrossingAlertFields": { + "description": "fields specific to threshold crossing alert events", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional threshold crossing alert fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "additionalParameters": { + "description": "performance counters", + "type": "array", + "items": { + "$ref": "#/definitions/counter" + } + }, + "alertAction": { + "description": "Event action", + "type": "string", + "enum": [ + "CLEAR", + "CONT", + "SET" + ] + }, + "alertDescription": { + "description": "Unique short alert description such as IF-SHUB-ERRDROP", + "type": "string" + }, + "alertType": { + "description": "Event type", + "type": "string", + "enum": [ + "CARD-ANOMALY", + "ELEMENT-ANOMALY", + "INTERFACE-ANOMALY", + "SERVICE-ANOMALY" + ] + }, + "alertValue": { + "description": "Calculated API value (if applicable)", + "type": "string" + }, + "associatedAlertIdList": { + "description": "List of eventIds associated with the event being reported", + "type": "array", + "items": { "type": "string" } + }, + "collectionTimestamp": { + "description": "Time when the performance collector picked up the data; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "dataCollector": { + "description": "Specific performance collector instance used", + "type": "string" + }, + "elementType": { + "description": "type of network element - internal ATT field", + "type": "string" + }, + "eventSeverity": { + "description": "event severity or priority", + "type": "string", + "enum": [ + "CRITICAL", + "MAJOR", + "MINOR", + "WARNING", + "NORMAL" + ] + }, + "eventStartTimestamp": { + "description": "Time closest to when the measurement was made; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "interfaceName": { + "description": "Physical or logical port or card (if applicable)", + "type": "string" + }, + "networkService": { + "description": "network name - internal ATT field", + "type": "string" + }, + "possibleRootCause": { + "description": "Reserved for future use", + "type": "string" + }, + "thresholdCrossingFieldsVersion": { + "description": "version of the thresholdCrossingAlertFields block", + "type": "number" + } + }, + "required": [ + "additionalParameters", + "alertAction", + "alertDescription", + "alertType", + "collectionTimestamp", + "eventSeverity", + "eventStartTimestamp" + ] + }, + "vNicUsage": { + "description": "usage of identified virtual network interface card", + "type": "object", + "properties": { + "broadcastPacketsIn": { "type": "number" }, + "broadcastPacketsOut": { "type": "number" }, + "bytesIn": { "type": "number" }, + "bytesOut": { "type": "number" }, + "multicastPacketsIn": { "type": "number" }, + "multicastPacketsOut": { "type": "number" }, + "packetsIn": { "type": "number" }, + "packetsOut": { "type": "number" }, + "unicastPacketsIn": { "type": "number" }, + "unicastPacketsOut": { "type": "number" }, + "vNicIdentifier": { "type": "string" } + }, + "required": [ "bytesIn", "bytesOut", "packetsIn", "packetsOut", "vNicIdentifier"] + } + }, + "title": "Event Listener", + "type": "object", + "properties": { + "event": {"$ref": "#/definitions/event"} + } +} diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/CommonEventFormat_v5.3.json b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/CommonEventFormat_v5.3.json new file mode 100644 index 0000000..3fd8138 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/CommonEventFormat_v5.3.json @@ -0,0 +1,1962 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "definitions": { + "codecsInUse": { + "description": "number of times an identified codec was used over the measurementInterval", + "type": "object", + "properties": { + "codecIdentifier": { + "type": "string" + }, + "numberInUse": { + "type": "integer" + } + }, + "required": ["codecIdentifier", + "numberInUse"] + }, + "command": { + "description": "command from an event collector toward an event source", + "type": "object", + "properties": { + "commandType": { + "type": "string", + "enum": ["heartbeatIntervalChange", + "measurementIntervalChange", + "provideThrottlingState", + "throttlingSpecification"] + }, + "eventDomainThrottleSpecification": { + "$ref": "#/definitions/eventDomainThrottleSpecification" + }, + "heartbeatInterval": { + "type": "integer" + }, + "measurementInterval": { + "type": "integer" + } + }, + "required": ["commandType"] + }, + "commandList": { + "description": "array of commands from an event collector toward an event source", + "type": "array", + "items": { + "$ref": "#/definitions/command" + }, + "minItems": 0 + }, + "commonEventHeader": { + "description": "fields common to all events", + "type": "object", + "properties": { + "domain": { + "description": "the eventing domain associated with the event", + "type": "string", + "enum": ["fault", + "heartbeat", + "measurementsForVfScaling", + "mobileFlow", + "other", + "sipSignaling", + "stateChange", + "syslog", + "thresholdCrossingAlert", + "voiceQuality"] + }, + "eventId": { + "description": "event key that is unique to the event source", + "type": "string" + }, + "eventName": { + "description": "unique event name", + "type": "string" + }, + "eventType": { + "description": "for example - applicationVnf, guestOS, hostOS, platform", + "type": "string" + }, + "internalHeaderFields": { + "$ref": "#/definitions/internalHeaderFields" + }, + "lastEpochMicrosec": { + "description": "the latest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "nfcNamingCode": { + "description": "3 character network function component type, aligned with vfc naming standards", + "type": "string" + }, + "nfNamingCode": { + "description": "4 character network function type, aligned with vnf naming standards", + "type": "string" + }, + "priority": { + "description": "processing priority", + "type": "string", + "enum": ["High", + "Medium", + "Normal", + "Low"] + }, + "reportingEntityId": { + "description": "UUID identifying the entity reporting the event, for example an OAM VM; must be populated by the ATT enrichment process", + "type": "string" + }, + "reportingEntityName": { + "description": "name of the entity reporting the event, for example, an EMS name; may be the same as sourceName", + "type": "string" + }, + "sequence": { + "description": "ordering of events communicated by an event source instance or 0 if not needed", + "type": "integer" + }, + "sourceId": { + "description": "UUID identifying the entity experiencing the event issue; must be populated by the ATT enrichment process", + "type": "string" + }, + "sourceName": { + "description": "name of the entity experiencing the event issue", + "type": "string" + }, + "startEpochMicrosec": { + "description": "the earliest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "version": { + "description": "version of the event header", + "type": "number" + } + }, + "required": ["domain", + "eventId", + "eventName", + "lastEpochMicrosec", + "priority", + "reportingEntityName", + "sequence", + "sourceName", + "startEpochMicrosec", + "version"] + }, + "counter": { + "description": "performance counter", + "type": "object", + "properties": { + "criticality": { + "type": "string", + "enum": ["CRIT", + "MAJ"] + }, + "name": { + "type": "string" + }, + "thresholdCrossed": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": ["criticality", + "name", + "thresholdCrossed", + "value"] + }, + "cpuUsage": { + "description": "usage of an identified CPU", + "type": "object", + "properties": { + "cpuIdentifier": { + "description": "cpu identifer", + "type": "string" + }, + "cpuIdle": { + "description": "percentage of CPU time spent in the idle task", + "type": "number" + }, + "cpuUsageInterrupt": { + "description": "percentage of time spent servicing interrupts", + "type": "number" + }, + "cpuUsageNice": { + "description": "percentage of time spent running user space processes that have been niced", + "type": "number" + }, + "cpuUsageSoftIrq": { + "description": "percentage of time spent handling soft irq interrupts", + "type": "number" + }, + "cpuUsageSteal": { + "description": "percentage of time spent in involuntary wait which is neither user, system or idle time and is effectively time that went missing", + "type": "number" + }, + "cpuUsageSystem": { + "description": "percentage of time spent on system tasks running the kernel", + "type": "number" + }, + "cpuUsageUser": { + "description": "percentage of time spent running un-niced user space processes", + "type": "number" + }, + "cpuWait": { + "description": "percentage of CPU time spent waiting for I/O operations to complete", + "type": "number" + }, + "percentUsage": { + "description": "aggregate cpu usage of the virtual machine on which the VNFC reporting the event is running", + "type": "number" + } + }, + "required": ["cpuIdentifier", + "percentUsage"] + }, + "diskUsage": { + "description": "usage of an identified disk", + "type": "object", + "properties": { + "diskIdentifier": { + "description": "disk identifier", + "type": "string" + }, + "diskIoTimeAvg": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the average over the measurement interval", + "type": "number" + }, + "diskIoTimeLast": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskIoTimeMax": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskIoTimeMin": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadAvg": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadLast": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadMax": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadMin": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteAvg": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteLast": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteMax": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteMin": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadAvg": { + "description": "number of octets per second read from a disk or partition; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadLast": { + "description": "number of octets per second read from a disk or partition; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadMax": { + "description": "number of octets per second read from a disk or partition; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadMin": { + "description": "number of octets per second read from a disk or partition; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteAvg": { + "description": "number of octets per second written to a disk or partition; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteLast": { + "description": "number of octets per second written to a disk or partition; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteMax": { + "description": "number of octets per second written to a disk or partition; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteMin": { + "description": "number of octets per second written to a disk or partition; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadAvg": { + "description": "number of read operations per second issued to the disk; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadLast": { + "description": "number of read operations per second issued to the disk; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadMax": { + "description": "number of read operations per second issued to the disk; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadMin": { + "description": "number of read operations per second issued to the disk; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteAvg": { + "description": "number of write operations per second issued to the disk; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteLast": { + "description": "number of write operations per second issued to the disk; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteMax": { + "description": "number of write operations per second issued to the disk; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteMin": { + "description": "number of write operations per second issued to the disk; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsAvg": { + "description": "queue size of pending I/O operations per second; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsLast": { + "description": "queue size of pending I/O operations per second; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsMax": { + "description": "queue size of pending I/O operations per second; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsMin": { + "description": "queue size of pending I/O operations per second; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadAvg": { + "description": "milliseconds a read operation took to complete; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadLast": { + "description": "milliseconds a read operation took to complete; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadMax": { + "description": "milliseconds a read operation took to complete; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadMin": { + "description": "milliseconds a read operation took to complete; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteAvg": { + "description": "milliseconds a write operation took to complete; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteLast": { + "description": "milliseconds a write operation took to complete; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteMax": { + "description": "milliseconds a write operation took to complete; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteMin": { + "description": "milliseconds a write operation took to complete; provide the minimum measurement within the measurement interval", + "type": "number" + } + }, + "required": ["diskIdentifier"] + }, + "endOfCallVqmSummaries": { + "description": "provides end of call voice quality metrics", + "type": "object", + "properties": { + "adjacencyName": { + "description": " adjacency name", + "type": "string" + }, + "endpointDescription": { + "description": "Either Caller or Callee", + "type": "string", + "enum": ["Caller", + "Callee"] + }, + "endpointJitter": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsDiscarded": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsReceived": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsSent": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsDiscarded": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsReceived": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsSent": { + "description": "", + "type": "number" + }, + "localJitter": { + "description": "", + "type": "number" + }, + "localRtpOctetsDiscarded": { + "description": "", + "type": "number" + }, + "localRtpOctetsReceived": { + "description": "", + "type": "number" + }, + "localRtpOctetsSent": { + "description": "", + "type": "number" + }, + "localRtpPacketsDiscarded": { + "description": "", + "type": "number" + }, + "localRtpPacketsReceived": { + "description": "", + "type": "number" + }, + "localRtpPacketsSent": { + "description": "", + "type": "number" + }, + "mosCqe": { + "description": "1-5 1dp", + "type": "number" + }, + "packetsLost": { + "description": "", + "type": "number" + }, + "packetLossPercent": { + "description": "Calculated percentage packet loss based on Endpoint RTP packets lost (as reported in RTCP) and Local RTP packets sent. Direction is based on Endpoint description (Caller, Callee). Decimal (2 dp)", + "type": "number" + }, + "rFactor": { + "description": "0-100", + "type": "number" + }, + "roundTripDelay": { + "description": "millisecs", + "type": "number" + } + }, + "required": ["adjacencyName", + "endpointDescription"] + }, + "event": { + "description": "the root level of the common event format", + "type": "object", + "properties": { + "commonEventHeader": { + "$ref": "#/definitions/commonEventHeader" + }, + "faultFields": { + "$ref": "#/definitions/faultFields" + }, + "heartbeatFields": { + "$ref": "#/definitions/heartbeatFields" + }, + "measurementsForVfScalingFields": { + "$ref": "#/definitions/measurementsForVfScalingFields" + }, + "mobileFlowFields": { + "$ref": "#/definitions/mobileFlowFields" + }, + "otherFields": { + "$ref": "#/definitions/otherFields" + }, + "sipSignalingFields": { + "$ref": "#/definitions/sipSignalingFields" + }, + "stateChangeFields": { + "$ref": "#/definitions/stateChangeFields" + }, + "syslogFields": { + "$ref": "#/definitions/syslogFields" + }, + "thresholdCrossingAlertFields": { + "$ref": "#/definitions/thresholdCrossingAlertFields" + }, + "voiceQualityFields": { + "$ref": "#/definitions/voiceQualityFields" + } + }, + "required": ["commonEventHeader"] + }, + "eventDomainThrottleSpecification": { + "description": "specification of what information to suppress within an event domain", + "type": "object", + "properties": { + "eventDomain": { + "description": "Event domain enum from the commonEventHeader domain field", + "type": "string" + }, + "suppressedFieldNames": { + "description": "List of optional field names in the event block that should not be sent to the Event Listener", + "type": "array", + "items": { + "type": "string" + } + }, + "suppressedNvPairsList": { + "description": "Optional list of specific NvPairsNames to suppress within a given Name-Value Field", + "type": "array", + "items": { + "$ref": "#/definitions/suppressedNvPairs" + } + } + }, + "required": ["eventDomain"] + }, + "eventDomainThrottleSpecificationList": { + "description": "array of eventDomainThrottleSpecifications", + "type": "array", + "items": { + "$ref": "#/definitions/eventDomainThrottleSpecification" + }, + "minItems": 0 + }, + "eventList": { + "description": "array of events", + "type": "array", + "items": { + "$ref": "#/definitions/event" + } + }, + "eventThrottlingState": { + "description": "reports the throttling in force at the event source", + "type": "object", + "properties": { + "eventThrottlingMode": { + "description": "Mode the event manager is in", + "type": "string", + "enum": ["normal", + "throttled"] + }, + "eventDomainThrottleSpecificationList": { + "$ref": "#/definitions/eventDomainThrottleSpecificationList" + } + }, + "required": ["eventThrottlingMode"] + }, + "faultFields": { + "description": "fields specific to fault events", + "type": "object", + "properties": { + "alarmAdditionalInformation": { + "description": "additional alarm information", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "alarmCondition": { + "description": "alarm condition reported by the device", + "type": "string" + }, + "alarmInterfaceA": { + "description": "card, port, channel or interface name of the device generating the alarm", + "type": "string" + }, + "eventCategory": { + "description": "Event category, for example: license, link, routing, security, signaling", + "type": "string" + }, + "eventSeverity": { + "description": "event severity", + "type": "string", + "enum": ["CRITICAL", + "MAJOR", + "MINOR", + "WARNING", + "NORMAL"] + }, + "eventSourceType": { + "description": "type of event source; examples: card, host, other, port, portThreshold, router, slotThreshold, switch, virtualMachine, virtualNetworkFunction", + "type": "string" + }, + "faultFieldsVersion": { + "description": "version of the faultFields block", + "type": "number" + }, + "specificProblem": { + "description": "short description of the alarm or problem", + "type": "string" + }, + "vfStatus": { + "description": "virtual function status enumeration", + "type": "string", + "enum": ["Active", + "Idle", + "Preparing to terminate", + "Ready to terminate", + "Requesting termination"] + } + }, + "required": ["alarmCondition", + "eventSeverity", + "eventSourceType", + "faultFieldsVersion", + "specificProblem", + "vfStatus"] + }, + "featuresInUse": { + "description": "number of times an identified feature was used over the measurementInterval", + "type": "object", + "properties": { + "featureIdentifier": { + "type": "string" + }, + "featureUtilization": { + "type": "integer" + } + }, + "required": ["featureIdentifier", + "featureUtilization"] + }, + "field": { + "description": "name value pair", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": ["name", + "value"] + }, + "filesystemUsage": { + "description": "disk usage of an identified virtual machine in gigabytes and/or gigabytes per second", + "type": "object", + "properties": { + "blockConfigured": { + "type": "number" + }, + "blockIops": { + "type": "number" + }, + "blockUsed": { + "type": "number" + }, + "ephemeralConfigured": { + "type": "number" + }, + "ephemeralIops": { + "type": "number" + }, + "ephemeralUsed": { + "type": "number" + }, + "filesystemName": { + "type": "string" + } + }, + "required": ["blockConfigured", + "blockIops", + "blockUsed", + "ephemeralConfigured", + "ephemeralIops", + "ephemeralUsed", + "filesystemName"] + }, + "gtpPerFlowMetrics": { + "description": "Mobility GTP Protocol per flow metrics", + "type": "object", + "properties": { + "avgBitErrorRate": { + "description": "average bit error rate", + "type": "number" + }, + "avgPacketDelayVariation": { + "description": "Average packet delay variation or jitter in milliseconds for received packets: Average difference between the packet timestamp and time received for all pairs of consecutive packets", + "type": "number" + }, + "avgPacketLatency": { + "description": "average delivery latency", + "type": "number" + }, + "avgReceiveThroughput": { + "description": "average receive throughput", + "type": "number" + }, + "avgTransmitThroughput": { + "description": "average transmit throughput", + "type": "number" + }, + "durConnectionFailedStatus": { + "description": "duration of failed state in milliseconds, computed as the cumulative time between a failed echo request and the next following successful error request, over this reporting interval", + "type": "number" + }, + "durTunnelFailedStatus": { + "description": "Duration of errored state, computed as the cumulative time between a tunnel error indicator and the next following non-errored indicator, over this reporting interval", + "type": "number" + }, + "flowActivatedBy": { + "description": "Endpoint activating the flow", + "type": "string" + }, + "flowActivationEpoch": { + "description": "Time the connection is activated in the flow (connection) being reported on, or transmission time of the first packet if activation time is not available", + "type": "number" + }, + "flowActivationMicrosec": { + "description": "Integer microseconds for the start of the flow connection", + "type": "number" + }, + "flowActivationTime": { + "description": "time the connection is activated in the flow being reported on, or transmission time of the first packet if activation time is not available; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "flowDeactivatedBy": { + "description": "Endpoint deactivating the flow", + "type": "string" + }, + "flowDeactivationEpoch": { + "description": "Time for the start of the flow connection, in integer UTC epoch time aka UNIX time", + "type": "number" + }, + "flowDeactivationMicrosec": { + "description": "Integer microseconds for the start of the flow connection", + "type": "number" + }, + "flowDeactivationTime": { + "description": "Transmission time of the first packet in the flow connection being reported on; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "flowStatus": { + "description": "connection status at reporting time as a working / inactive / failed indicator value", + "type": "string" + }, + "gtpConnectionStatus": { + "description": "Current connection state at reporting time", + "type": "string" + }, + "gtpTunnelStatus": { + "description": "Current tunnel state at reporting time", + "type": "string" + }, + "ipTosCountList": { + "description": "array of key: value pairs where the keys are drawn from the IP Type-of-Service identifiers which range from '0' to '255', and the values are the count of packets that had those ToS identifiers in the flow", + "type": "array", + "items": { + "type": "array", + "items": [{ + "type": "string" + }, + { + "type": "number" + }] + } + }, + "ipTosList": { + "description": "Array of unique IP Type-of-Service values observed in the flow where values range from '0' to '255'", + "type": "array", + "items": { + "type": "string" + } + }, + "largePacketRtt": { + "description": "large packet round trip time", + "type": "number" + }, + "largePacketThreshold": { + "description": "large packet threshold being applied", + "type": "number" + }, + "maxPacketDelayVariation": { + "description": "Maximum packet delay variation or jitter in milliseconds for received packets: Maximum of the difference between the packet timestamp and time received for all pairs of consecutive packets", + "type": "number" + }, + "maxReceiveBitRate": { + "description": "maximum receive bit rate", + "type": "number" + }, + "maxTransmitBitRate": { + "description": "maximum transmit bit rate", + "type": "number" + }, + "mobileQciCosCountList": { + "description": "array of key: value pairs where the keys are drawn from LTE QCI or UMTS class of service strings, and the values are the count of packets that had those strings in the flow", + "type": "array", + "items": { + "type": "array", + "items": [{ + "type": "string" + }, + { + "type": "number" + }] + } + }, + "mobileQciCosList": { + "description": "Array of unique LTE QCI or UMTS class-of-service values observed in the flow", + "type": "array", + "items": { + "type": "string" + } + }, + "numActivationFailures": { + "description": "Number of failed activation requests, as observed by the reporting node", + "type": "number" + }, + "numBitErrors": { + "description": "number of errored bits", + "type": "number" + }, + "numBytesReceived": { + "description": "number of bytes received, including retransmissions", + "type": "number" + }, + "numBytesTransmitted": { + "description": "number of bytes transmitted, including retransmissions", + "type": "number" + }, + "numDroppedPackets": { + "description": "number of received packets dropped due to errors per virtual interface", + "type": "number" + }, + "numGtpEchoFailures": { + "description": "Number of Echo request path failures where failed paths are defined in 3GPP TS 29.281 sec 7.2.1 and 3GPP TS 29.060 sec. 11.2", + "type": "number" + }, + "numGtpTunnelErrors": { + "description": "Number of tunnel error indications where errors are defined in 3GPP TS 29.281 sec 7.3.1 and 3GPP TS 29.060 sec. 11.1", + "type": "number" + }, + "numHttpErrors": { + "description": "Http error count", + "type": "number" + }, + "numL7BytesReceived": { + "description": "number of tunneled layer 7 bytes received, including retransmissions", + "type": "number" + }, + "numL7BytesTransmitted": { + "description": "number of tunneled layer 7 bytes transmitted, excluding retransmissions", + "type": "number" + }, + "numLostPackets": { + "description": "number of lost packets", + "type": "number" + }, + "numOutOfOrderPackets": { + "description": "number of out-of-order packets", + "type": "number" + }, + "numPacketErrors": { + "description": "number of errored packets", + "type": "number" + }, + "numPacketsReceivedExclRetrans": { + "description": "number of packets received, excluding retransmission", + "type": "number" + }, + "numPacketsReceivedInclRetrans": { + "description": "number of packets received, including retransmission", + "type": "number" + }, + "numPacketsTransmittedInclRetrans": { + "description": "number of packets transmitted, including retransmissions", + "type": "number" + }, + "numRetries": { + "description": "number of packet retries", + "type": "number" + }, + "numTimeouts": { + "description": "number of packet timeouts", + "type": "number" + }, + "numTunneledL7BytesReceived": { + "description": "number of tunneled layer 7 bytes received, excluding retransmissions", + "type": "number" + }, + "roundTripTime": { + "description": "round trip time", + "type": "number" + }, + "tcpFlagCountList": { + "description": "array of key: value pairs where the keys are drawn from TCP Flags and the values are the count of packets that had that TCP Flag in the flow", + "type": "array", + "items": { + "type": "array", + "items": [{ + "type": "string" + }, + { + "type": "number" + }] + } + }, + "tcpFlagList": { + "description": "Array of unique TCP Flags observed in the flow", + "type": "array", + "items": { + "type": "string" + } + }, + "timeToFirstByte": { + "description": "Time in milliseconds between the connection activation and first byte received", + "type": "number" + } + }, + "required": ["avgBitErrorRate", + "avgPacketDelayVariation", + "avgPacketLatency", + "avgReceiveThroughput", + "avgTransmitThroughput", + "flowActivationEpoch", + "flowActivationMicrosec", + "flowDeactivationEpoch", + "flowDeactivationMicrosec", + "flowDeactivationTime", + "flowStatus", + "maxPacketDelayVariation", + "numActivationFailures", + "numBitErrors", + "numBytesReceived", + "numBytesTransmitted", + "numDroppedPackets", + "numL7BytesReceived", + "numL7BytesTransmitted", + "numLostPackets", + "numOutOfOrderPackets", + "numPacketErrors", + "numPacketsReceivedExclRetrans", + "numPacketsReceivedInclRetrans", + "numPacketsTransmittedInclRetrans", + "numRetries", + "numTimeouts", + "numTunneledL7BytesReceived", + "roundTripTime", + "timeToFirstByte"] + }, + "heartbeatFields": { + "description": "optional field block for fields specific to heartbeat events", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional heartbeat fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "heartbeatFieldsVersion": { + "description": "version of the heartbeatFields block", + "type": "number" + }, + "heartbeatInterval": { + "description": "current heartbeat interval in seconds", + "type": "integer" + } + }, + "required": ["heartbeatFieldsVersion", + "heartbeatInterval"] + }, + "internalHeaderFields": { + "description": "enrichment fields for internal VES Event Listener service use only, not supplied by event sources", + "type": "object" + }, + "jsonObject": { + "description": "json object schema, name and other meta-information along with one or more object instances", + "type": "object", + "properties": { + "objectInstances": { + "description": "one or more instances of the jsonObject", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObjectInstance" + } + }, + "objectName": { + "description": "name of the JSON Object", + "type": "string" + }, + "objectSchema": { + "description": "json schema for the object", + "type": "string" + }, + "objectSchemaUrl": { + "description": "Url to the json schema for the object", + "type": "string" + }, + "nfSubscribedObjectName": { + "description": "name of the object associated with the nfSubscriptonId", + "type": "string" + }, + "nfSubscriptionId": { + "description": "identifies an openConfig telemetry subscription on a network function, which configures the network function to send complex object data associated with the jsonObject", + "type": "string" + } + }, + "required": ["objectInstances", + "objectName"] + }, + "jsonObjectInstance": { + "description": "meta-information about an instance of a jsonObject along with the actual object instance", + "type": "object", + "properties": { + "objectInstance": { + "description": "an instance conforming to the jsonObject schema", + "type": "object" + }, + "objectInstanceEpochMicrosec": { + "description": "the unix time aka epoch time associated with this objectInstance--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "objectKeys": { + "description": "an ordered set of keys that identifies this particular instance of jsonObject", + "type": "array", + "items": { + "$ref": "#/definitions/key" + } + } + }, + "required": ["objectInstance"] + }, + "key": { + "description": "tuple which provides the name of a key along with its value and relative order", + "type": "object", + "properties": { + "keyName": { + "description": "name of the key", + "type": "string" + }, + "keyOrder": { + "description": "relative sequence or order of the key with respect to other keys", + "type": "integer" + }, + "keyValue": { + "description": "value of the key", + "type": "string" + } + }, + "required": ["keyName"] + }, + "latencyBucketMeasure": { + "description": "number of counts falling within a defined latency bucket", + "type": "object", + "properties": { + "countsInTheBucket": { + "type": "number" + }, + "highEndOfLatencyBucket": { + "type": "number" + }, + "lowEndOfLatencyBucket": { + "type": "number" + } + }, + "required": ["countsInTheBucket"] + }, + "measurementsForVfScalingFields": { + "description": "measurementsForVfScaling fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional name-value-pair fields", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "additionalMeasurements": { + "description": "array of named name-value-pair arrays", + "type": "array", + "items": { + "$ref": "#/definitions/namedArrayOfFields" + } + }, + "additionalObjects": { + "description": "array of JSON objects described by name, schema and other meta-information", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObject" + } + }, + "codecUsageArray": { + "description": "array of codecs in use", + "type": "array", + "items": { + "$ref": "#/definitions/codecsInUse" + } + }, + "concurrentSessions": { + "description": "peak concurrent sessions for the VM or VNF over the measurementInterval", + "type": "integer" + }, + "configuredEntities": { + "description": "over the measurementInterval, peak total number of: users, subscribers, devices, adjacencies, etc., for the VM, or subscribers, devices, etc., for the VNF", + "type": "integer" + }, + "cpuUsageArray": { + "description": "usage of an array of CPUs", + "type": "array", + "items": { + "$ref": "#/definitions/cpuUsage" + } + }, + "diskUsageArray": { + "description": "usage of an array of disks", + "type": "array", + "items": { + "$ref": "#/definitions/diskUsage" + } + }, + "featureUsageArray": { + "description": "array of features in use", + "type": "array", + "items": { + "$ref": "#/definitions/featuresInUse" + } + }, + "filesystemUsageArray": { + "description": "filesystem usage of the VM on which the VNFC reporting the event is running", + "type": "array", + "items": { + "$ref": "#/definitions/filesystemUsage" + } + }, + "latencyDistribution": { + "description": "array of integers representing counts of requests whose latency in milliseconds falls within per-VNF configured ranges", + "type": "array", + "items": { + "$ref": "#/definitions/latencyBucketMeasure" + } + }, + "meanRequestLatency": { + "description": "mean seconds required to respond to each request for the VM on which the VNFC reporting the event is running", + "type": "number" + }, + "measurementInterval": { + "description": "interval over which measurements are being reported in seconds", + "type": "number" + }, + "measurementsForVfScalingVersion": { + "description": "version of the measurementsForVfScaling block", + "type": "number" + }, + "memoryUsageArray": { + "description": "memory usage of an array of VMs", + "type": "array", + "items": { + "$ref": "#/definitions/memoryUsage" + } + }, + "numberOfMediaPortsInUse": { + "description": "number of media ports in use", + "type": "integer" + }, + "requestRate": { + "description": "peak rate of service requests per second to the VNF over the measurementInterval", + "type": "number" + }, + "vnfcScalingMetric": { + "description": "represents busy-ness of the VNF from 0 to 100 as reported by the VNFC", + "type": "integer" + }, + "vNicPerformanceArray": { + "description": "usage of an array of virtual network interface cards", + "type": "array", + "items": { + "$ref": "#/definitions/vNicPerformance" + } + } + }, + "required": ["measurementInterval", + "measurementsForVfScalingVersion"] + }, + "memoryUsage": { + "description": "memory usage of an identified virtual machine", + "type": "object", + "properties": { + "memoryBuffered": { + "description": "kibibytes of temporary storage for raw disk blocks", + "type": "number" + }, + "memoryCached": { + "description": "kibibytes of memory used for cache", + "type": "number" + }, + "memoryConfigured": { + "description": "kibibytes of memory configured in the virtual machine on which the VNFC reporting the event is running", + "type": "number" + }, + "memoryFree": { + "description": "kibibytes of physical RAM left unused by the system", + "type": "number" + }, + "memorySlabRecl": { + "description": "the part of the slab that can be reclaimed such as caches measured in kibibytes", + "type": "number" + }, + "memorySlabUnrecl": { + "description": "the part of the slab that cannot be reclaimed even when lacking memory measured in kibibytes", + "type": "number" + }, + "memoryUsed": { + "description": "total memory minus the sum of free, buffered, cached and slab memory measured in kibibytes", + "type": "number" + }, + "vmIdentifier": { + "description": "virtual machine identifier associated with the memory metrics", + "type": "string" + } + }, + "required": ["memoryFree", + "memoryUsed", + "vmIdentifier"] + }, + "mobileFlowFields": { + "description": "mobileFlow fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional mobileFlow fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "applicationType": { + "description": "Application type inferred", + "type": "string" + }, + "appProtocolType": { + "description": "application protocol", + "type": "string" + }, + "appProtocolVersion": { + "description": "application protocol version", + "type": "string" + }, + "cid": { + "description": "cell id", + "type": "string" + }, + "connectionType": { + "description": "Abbreviation referencing a 3GPP reference point e.g., S1-U, S11, etc", + "type": "string" + }, + "ecgi": { + "description": "Evolved Cell Global Id", + "type": "string" + }, + "flowDirection": { + "description": "Flow direction, indicating if the reporting node is the source of the flow or destination for the flow", + "type": "string" + }, + "gtpPerFlowMetrics": { + "$ref": "#/definitions/gtpPerFlowMetrics" + }, + "gtpProtocolType": { + "description": "GTP protocol", + "type": "string" + }, + "gtpVersion": { + "description": "GTP protocol version", + "type": "string" + }, + "httpHeader": { + "description": "HTTP request header, if the flow connects to a node referenced by HTTP", + "type": "string" + }, + "imei": { + "description": "IMEI for the subscriber UE used in this flow, if the flow connects to a mobile device", + "type": "string" + }, + "imsi": { + "description": "IMSI for the subscriber UE used in this flow, if the flow connects to a mobile device", + "type": "string" + }, + "ipProtocolType": { + "description": "IP protocol type e.g., TCP, UDP, RTP...", + "type": "string" + }, + "ipVersion": { + "description": "IP protocol version e.g., IPv4, IPv6", + "type": "string" + }, + "lac": { + "description": "location area code", + "type": "string" + }, + "mcc": { + "description": "mobile country code", + "type": "string" + }, + "mnc": { + "description": "mobile network code", + "type": "string" + }, + "mobileFlowFieldsVersion": { + "description": "version of the mobileFlowFields block", + "type": "number" + }, + "msisdn": { + "description": "MSISDN for the subscriber UE used in this flow, as an integer, if the flow connects to a mobile device", + "type": "string" + }, + "otherEndpointIpAddress": { + "description": "IP address for the other endpoint, as used for the flow being reported on", + "type": "string" + }, + "otherEndpointPort": { + "description": "IP Port for the reporting entity, as used for the flow being reported on", + "type": "integer" + }, + "otherFunctionalRole": { + "description": "Functional role of the other endpoint for the flow being reported on e.g., MME, S-GW, P-GW, PCRF...", + "type": "string" + }, + "rac": { + "description": "routing area code", + "type": "string" + }, + "radioAccessTechnology": { + "description": "Radio Access Technology e.g., 2G, 3G, LTE", + "type": "string" + }, + "reportingEndpointIpAddr": { + "description": "IP address for the reporting entity, as used for the flow being reported on", + "type": "string" + }, + "reportingEndpointPort": { + "description": "IP port for the reporting entity, as used for the flow being reported on", + "type": "integer" + }, + "sac": { + "description": "service area code", + "type": "string" + }, + "samplingAlgorithm": { + "description": "Integer identifier for the sampling algorithm or rule being applied in calculating the flow metrics if metrics are calculated based on a sample of packets, or 0 if no sampling is applied", + "type": "integer" + }, + "tac": { + "description": "transport area code", + "type": "string" + }, + "tunnelId": { + "description": "tunnel identifier", + "type": "string" + }, + "vlanId": { + "description": "VLAN identifier used by this flow", + "type": "string" + } + }, + "required": ["flowDirection", + "gtpPerFlowMetrics", + "ipProtocolType", + "ipVersion", + "mobileFlowFieldsVersion", + "otherEndpointIpAddress", + "otherEndpointPort", + "reportingEndpointIpAddr", + "reportingEndpointPort"] + }, + "namedArrayOfFields": { + "description": "an array of name value pairs along with a name for the array", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "arrayOfFields": { + "description": "array of name value pairs", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + } + }, + "required": ["name", + "arrayOfFields"] + }, + "otherFields": { + "description": "fields for events belonging to the 'other' domain of the commonEventHeader domain enumeration", + "type": "object", + "properties": { + "hashOfNameValuePairArrays": { + "description": "array of named name-value-pair arrays", + "type": "array", + "items": { + "$ref": "#/definitions/namedArrayOfFields" + } + }, + "jsonObjects": { + "description": "array of JSON objects described by name, schema and other meta-information", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObject" + } + }, + "nameValuePairs": { + "description": "array of name-value pairs", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "otherFieldsVersion": { + "description": "version of the otherFields block", + "type": "number" + } + }, + "required": ["otherFieldsVersion"] + }, + "requestError": { + "description": "standard request error data structure", + "type": "object", + "properties": { + "messageId": { + "description": "Unique message identifier of the format ABCnnnn where ABC is either SVC for Service Exceptions or POL for Policy Exception", + "type": "string" + }, + "text": { + "description": "Message text, with replacement variables marked with %n, where n is an index into the list of <variables> elements, starting at 1", + "type": "string" + }, + "url": { + "description": "Hyperlink to a detailed error resource e.g., an HTML page for browser user agents", + "type": "string" + }, + "variables": { + "description": "List of zero or more strings that represent the contents of the variables used by the message text", + "type": "string" + } + }, + "required": ["messageId", + "text"] + }, + "sipSignalingFields": { + "description": "sip signaling fields", + "type": "object", + "properties": { + "additionalInformation": { + "description": "additional sip signaling fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "compressedSip": { + "description": "the full SIP request/response including headers and bodies", + "type": "string" + }, + "correlator": { + "description": "this is the same for all events on this call", + "type": "string" + }, + "localIpAddress": { + "description": "IP address on VNF", + "type": "string" + }, + "localPort": { + "description": "port on VNF", + "type": "string" + }, + "remoteIpAddress": { + "description": "IP address of peer endpoint", + "type": "string" + }, + "remotePort": { + "description": "port of peer endpoint", + "type": "string" + }, + "sipSignalingFieldsVersion": { + "description": "version of the sipSignalingFields block", + "type": "number" + }, + "summarySip": { + "description": "the SIP Method or Response (�INVITE�, �200 OK�, �BYE�, etc)", + "type": "string" + }, + "vendorVnfNameFields": { + "$ref": "#/definitions/vendorVnfNameFields" + } + }, + "required": ["correlator", + "localIpAddress", + "localPort", + "remoteIpAddress", + "remotePort", + "sipSignalingFieldsVersion", + "vendorVnfNameFields"] + }, + "stateChangeFields": { + "description": "stateChange fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional stateChange fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "newState": { + "description": "new state of the entity", + "type": "string", + "enum": ["inService", + "maintenance", + "outOfService"] + }, + "oldState": { + "description": "previous state of the entity", + "type": "string", + "enum": ["inService", + "maintenance", + "outOfService"] + }, + "stateChangeFieldsVersion": { + "description": "version of the stateChangeFields block", + "type": "number" + }, + "stateInterface": { + "description": "card or port name of the entity that changed state", + "type": "string" + } + }, + "required": ["newState", + "oldState", + "stateChangeFieldsVersion", + "stateInterface"] + }, + "suppressedNvPairs": { + "description": "List of specific NvPairsNames to suppress within a given Name-Value Field for event Throttling", + "type": "object", + "properties": { + "nvPairFieldName": { + "description": "Name of the field within which are the nvpair names to suppress", + "type": "string" + }, + "suppressedNvPairNames": { + "description": "Array of nvpair names to suppress within the nvpairFieldName", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["nvPairFieldName", + "suppressedNvPairNames"] + }, + "syslogFields": { + "description": "sysLog fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional syslog fields if needed provided as name=value delimited by a pipe �|� symbol, for example: 'name1=value1|name2=value2|�'", + "type": "string" + }, + "eventSourceHost": { + "description": "hostname of the device", + "type": "string" + }, + "eventSourceType": { + "description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction", + "type": "string" + }, + "syslogFacility": { + "description": "numeric code from 0 to 23 for facility--see table in documentation", + "type": "integer" + }, + "syslogFieldsVersion": { + "description": "version of the syslogFields block", + "type": "number" + }, + "syslogMsg": { + "description": "syslog message", + "type": "string" + }, + "syslogPri": { + "description": "0-192 combined severity and facility", + "type": "integer" + }, + "syslogProc": { + "description": "identifies the application that originated the message", + "type": "string" + }, + "syslogProcId": { + "description": "a change in the value of this field indicates a discontinuity in syslog reporting", + "type": "number" + }, + "syslogSData": { + "description": "syslog structured data consisting of a structured data Id followed by a set of key value pairs", + "type": "string" + }, + "syslogSdId": { + "description": "0-32 char in format name@number for example ourSDID@32473", + "type": "string" + }, + "syslogSev": { + "description": "numerical Code for severity derived from syslogPri as remaider of syslogPri / 8", + "type": "string", + "enum": ["Alert", + "Critical", + "Debug", + "Emergency", + "Error", + "Info", + "Notice", + "Warning"] + }, + "syslogTag": { + "description": "msgId indicating the type of message such as TCPOUT or TCPIN; NILVALUE should be used when no other value can be provided", + "type": "string" + }, + "syslogVer": { + "description": "IANA assigned version of the syslog protocol specification - typically 1", + "type": "number" + } + }, + "required": ["eventSourceType", + "syslogFieldsVersion", + "syslogMsg", + "syslogTag"] + }, + "thresholdCrossingAlertFields": { + "description": "fields specific to threshold crossing alert events", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional threshold crossing alert fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "additionalParameters": { + "description": "performance counters", + "type": "array", + "items": { + "$ref": "#/definitions/counter" + } + }, + "alertAction": { + "description": "Event action", + "type": "string", + "enum": ["CLEAR", + "CONT", + "SET"] + }, + "alertDescription": { + "description": "Unique short alert description such as IF-SHUB-ERRDROP", + "type": "string" + }, + "alertType": { + "description": "Event type", + "type": "string", + "enum": ["CARD-ANOMALY", + "ELEMENT-ANOMALY", + "INTERFACE-ANOMALY", + "SERVICE-ANOMALY"] + }, + "alertValue": { + "description": "Calculated API value (if applicable)", + "type": "string" + }, + "associatedAlertIdList": { + "description": "List of eventIds associated with the event being reported", + "type": "array", + "items": { + "type": "string" + } + }, + "collectionTimestamp": { + "description": "Time when the performance collector picked up the data; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "dataCollector": { + "description": "Specific performance collector instance used", + "type": "string" + }, + "elementType": { + "description": "type of network element - internal ATT field", + "type": "string" + }, + "eventSeverity": { + "description": "event severity or priority", + "type": "string", + "enum": ["CRITICAL", + "MAJOR", + "MINOR", + "WARNING", + "NORMAL"] + }, + "eventStartTimestamp": { + "description": "Time closest to when the measurement was made; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "interfaceName": { + "description": "Physical or logical port or card (if applicable)", + "type": "string" + }, + "networkService": { + "description": "network name - internal ATT field", + "type": "string" + }, + "possibleRootCause": { + "description": "Reserved for future use", + "type": "string" + }, + "thresholdCrossingFieldsVersion": { + "description": "version of the thresholdCrossingAlertFields block", + "type": "number" + } + }, + "required": ["additionalParameters", + "alertAction", + "alertDescription", + "alertType", + "collectionTimestamp", + "eventSeverity", + "eventStartTimestamp", + "thresholdCrossingFieldsVersion"] + }, + "vendorVnfNameFields": { + "description": "provides vendor, vnf and vfModule identifying information", + "type": "object", + "properties": { + "vendorName": { + "description": "VNF vendor name", + "type": "string" + }, + "vfModuleName": { + "description": "ASDC vfModuleName for the vfModule generating the event", + "type": "string" + }, + "vnfName": { + "description": "ASDC modelName for the VNF generating the event", + "type": "string" + } + }, + "required": ["vendorName"] + }, + "vNicPerformance": { + "description": "describes the performance and errors of an identified virtual network interface card", + "type": "object", + "properties": { + "receivedBroadcastPacketsAccumulated": { + "description": "Cumulative count of broadcast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedBroadcastPacketsDelta": { + "description": "Count of broadcast packets received within the measurement interval", + "type": "number" + }, + "receivedDiscardedPacketsAccumulated": { + "description": "Cumulative count of discarded packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedDiscardedPacketsDelta": { + "description": "Count of discarded packets received within the measurement interval", + "type": "number" + }, + "receivedErrorPacketsAccumulated": { + "description": "Cumulative count of error packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedErrorPacketsDelta": { + "description": "Count of error packets received within the measurement interval", + "type": "number" + }, + "receivedMulticastPacketsAccumulated": { + "description": "Cumulative count of multicast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedMulticastPacketsDelta": { + "description": "Count of multicast packets received within the measurement interval", + "type": "number" + }, + "receivedOctetsAccumulated": { + "description": "Cumulative count of octets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedOctetsDelta": { + "description": "Count of octets received within the measurement interval", + "type": "number" + }, + "receivedTotalPacketsAccumulated": { + "description": "Cumulative count of all packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedTotalPacketsDelta": { + "description": "Count of all packets received within the measurement interval", + "type": "number" + }, + "receivedUnicastPacketsAccumulated": { + "description": "Cumulative count of unicast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedUnicastPacketsDelta": { + "description": "Count of unicast packets received within the measurement interval", + "type": "number" + }, + "transmittedBroadcastPacketsAccumulated": { + "description": "Cumulative count of broadcast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedBroadcastPacketsDelta": { + "description": "Count of broadcast packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedDiscardedPacketsAccumulated": { + "description": "Cumulative count of discarded packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedDiscardedPacketsDelta": { + "description": "Count of discarded packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedErrorPacketsAccumulated": { + "description": "Cumulative count of error packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedErrorPacketsDelta": { + "description": "Count of error packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedMulticastPacketsAccumulated": { + "description": "Cumulative count of multicast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedMulticastPacketsDelta": { + "description": "Count of multicast packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedOctetsAccumulated": { + "description": "Cumulative count of octets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedOctetsDelta": { + "description": "Count of octets transmitted within the measurement interval", + "type": "number" + }, + "transmittedTotalPacketsAccumulated": { + "description": "Cumulative count of all packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedTotalPacketsDelta": { + "description": "Count of all packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedUnicastPacketsAccumulated": { + "description": "Cumulative count of unicast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedUnicastPacketsDelta": { + "description": "Count of unicast packets transmitted within the measurement interval", + "type": "number" + }, + "valuesAreSuspect": { + "description": "Indicates whether vNicPerformance values are likely inaccurate due to counter overflow or other condtions", + "type": "string", + "enum": ["true", + "false"] + }, + "vNicIdentifier": { + "description": "vNic identification", + "type": "string" + } + }, + "required": ["valuesAreSuspect", + "vNicIdentifier"] + }, + "voiceQualityFields": { + "description": "provides statistics related to customer facing voice products", + "type": "object", + "properties": { + "additionalInformation": { + "description": "additional voice quality fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "calleeSideCodec": { + "description": "callee codec for the call", + "type": "string" + }, + "callerSideCodec": { + "description": "caller codec for the call", + "type": "string" + }, + "correlator": { + "description": "this is the same for all events on this call", + "type": "string" + }, + "endOfCallVqmSummaries": { + "$ref": "#/definitions/endOfCallVqmSummaries" + }, + "phoneNumber": { + "description": "phone number associated with the correlator", + "type": "string" + }, + "midCallRtcp": { + "description": "Base64 encoding of the binary RTCP data excluding Eth/IP/UDP headers", + "type": "string" + }, + "vendorVnfNameFields": { + "$ref": "#/definitions/vendorVnfNameFields" + }, + "voiceQualityFieldsVersion": { + "description": "version of the voiceQualityFields block", + "type": "number" + } + }, + "required": ["calleeSideCodec", + "callerSideCodec", + "correlator", + "midCallRtcp", + "vendorVnfNameFields", + "voiceQualityFieldsVersion"] + } + }, + "title": "Event Listener", + "type": "object", + "properties": { + "event": { + "$ref": "#/definitions/event" + } + } +} diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/logback-spring.xml b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/logback-spring.xml new file mode 100644 index 0000000..7c0bfb6 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/logback-spring.xml @@ -0,0 +1,342 @@ +<!-- Copyright (c) 2016 AT&T Intellectual Property. All rights reserved. --> +<configuration scan="true" scanPeriod="3 seconds"> + <include resource="org/springframework/boot/logging/logback/base.xml"/> + <!--<jmxConfigurator /> --> + + <!-- specify the component name --> + <property name="componentName" value="DCAE"></property> + + <!-- log file names --> + <property name="generalLogName" value="application" /> + <property name="securityLogName" value="security" /> + <property name="performanceLogName" value="performance" /> + <property name="serverLogName" value="server" /> + <property name="policyLogName" value="policy" /> + <property name="errorLogName" value="error" /> + <property name="metricsLogName" value="metrics" /> + <property name="auditLogName" value="audit" /> + <property name="debugLogName" value="debug" /> + + <property name="defaultPattern" value="%date{ISO8601,UTC}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Timer}| %msg%n" /> + + <property name="auditLoggerPattern" value="%X{BeginTimestamp}|%X{EndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ElapsedTime}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| %msg%n" /> + <property name="metricsLoggerPattern" value="%X{BeginTimestamp}|%X{EndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ElapsedTime}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{TargetVirtualEntity}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| %msg%n" /> + <property name="errorLoggerPattern" value="%date{ISO8601,UTC}|%X{RequestId}|%thread|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%.-5level|%X{ErrorCode}|%X{ErrorDescription}| %msg%n" /> + <property name="debugLoggerPattern" value="%date{ISO8601,UTC}|%X{RequestId}| %msg%n" /> + + <property name="logDirectory" value="${log.home}/${componentName}" /> + + + <!-- Example evaluator filter applied against console appender --> + <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + + <!-- ============================================================================ --> + <!-- EELF Appenders --> + <!-- ============================================================================ --> + + <!-- The EELFAppender is used to record events to the general application + log --> + + + <appender name="EELF" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${generalLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELF" /> + </appender> + + <!-- EELF Security Appender. This appender is used to record security events + to the security log file. Security events are separate from other loggers + in EELF so that security log records can be captured and managed in a secure + way separate from the other logs. This appender is set to never discard any + events. --> + <appender name="EELFSecurity" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${securityLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <discardingThreshold>0</discardingThreshold> + <appender-ref ref="EELFSecurity" /> + </appender> + + <!-- EELF Performance Appender. This appender is used to record performance + records. --> + <appender name="EELFPerformance" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${performanceLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFPerformance" /> + </appender> + + <!-- EELF Server Appender. This appender is used to record Server related + logging events. The Server logger and appender are specializations of the + EELF application root logger and appender. This can be used to segregate Server + events from other components, or it can be eliminated to record these events + as part of the application root log. --> + <appender name="EELFServer" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${serverLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFServer" /> + </appender> + + + <!-- EELF Policy Appender. This appender is used to record Policy engine + related logging events. The Policy logger and appender are specializations + of the EELF application root logger and appender. This can be used to segregate + Policy engine events from other components, or it can be eliminated to record + these events as part of the application root log. --> + <appender name="EELFPolicy" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${policyLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFPolicy" /> + </appender> + + + <!-- EELF Audit Appender. This appender is used to record audit engine + related logging events. The audit logger and appender are specializations + of the EELF application root logger and appender. This can be used to segregate + Policy engine events from other components, or it can be eliminated to record + these events as part of the application root log. --> + + <!-- Audit log --> + <appender name="EELFAudit" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${auditLogName}.log</file> + <!-- Audit messages filter - accept audit messages --> + <filter class="ch.qos.logback.core.filter.EvaluatorFilter"> + <evaluator class="ch.qos.logback.classic.boolex.OnMarkerEvaluator"> + <marker>AUDIT_MARKER</marker> + <marker>AUDIT</marker> + </evaluator> + <onMismatch>DENY</onMismatch> + <onMatch>ACCEPT</onMatch> + </filter> + + <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip</fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>10</maxIndex> + </rollingPolicy> + + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>20MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${auditLoggerPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFAudit" /> + </appender> + +<appender name="EELFMetrics" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${metricsLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - + %msg%n"</pattern> --> + <pattern>${metricsLoggerPattern}</pattern> + </encoder> + </appender> + + + <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFMetrics"/> + </appender> + + <appender name="EELFError" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${errorLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${errorLoggerPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFError"/> + </appender> + + <appender name="EELFDebug" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${debugLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${debugLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${debugLoggerPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFDebug" /> + <includeCallerData>true</includeCallerData> + </appender> + + + <!-- ============================================================================ --> + <!-- EELF loggers --> + <!-- ============================================================================ --> + <logger name="com.att.eelf" level="info" additivity="false"> + <appender-ref ref="asyncEELF" /> + </logger> + <logger name="com.att.eelf.security" level="info" additivity="false"> + <appender-ref ref="asyncEELFSecurity" /> + </logger> + <logger name="com.att.eelf.perf" level="info" additivity="false"> + <appender-ref ref="asyncEELFPerformance" /> + </logger> + <logger name="com.att.eelf.server" level="info" additivity="false"> + <appender-ref ref="asyncEELFServer" /> + </logger> + <logger name="com.att.eelf.policy" level="info" additivity="false"> + <appender-ref ref="asyncEELFPolicy" /> + </logger> + + <logger name="org.openecomp.sdc.common.ecomplog.EcompLoggerAudit" level="info" additivity="false"> + <appender-ref ref="asyncEELFAudit" /> + </logger> + + <logger name="com.att.eelf.metrics" level="info" additivity="false"> + <appender-ref ref="asyncEELFMetrics" /> + </logger> + + + <logger name="com.att.eelf.error" level="error" additivity="false"> + <appender-ref ref="asyncEELFError" /> + </logger> + + <logger name="com.att.eelf.debug" level="debug" additivity="false"> + <appender-ref ref="asyncEELFDebug" /> + </logger> + + + + + <root level="INFO"> + <appender-ref ref="asyncEELF" /> + </root> + +</configuration> diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/org.onap.sdc.p12 b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/org.onap.sdc.p12 Binary files differnew file mode 100644 index 0000000..ee000dc --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/org.onap.sdc.p12 diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/org.onap.sdc.trust.jks b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/org.onap.sdc.trust.jks Binary files differnew file mode 100644 index 0000000..342c4f2 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/files/default/org.onap.sdc.trust.jks diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/consumer_creation.rb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/consumer_creation.rb new file mode 100644 index 0000000..b6edb82 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/consumer_creation.rb @@ -0,0 +1,29 @@ +sdc_be_vip = node['BE_VIP'] + +if node['disableHttp'] + protocol = "https" + sdc_be_port = node['SDC']['BE'][:https_port] +else + protocol = "http" + sdc_be_port = node['SDC']['BE'][:http_port] +end + +template "/var/tmp/consumers.py" do + source "consumers.py.erb" + owner "jetty" + group "jetty" + mode "0755" + variables({ + :consumerName => node['DCAE']['consumerName'], + :protocol => protocol, + :catalog_ip => sdc_be_vip, + :catalog_port => sdc_be_port + }) +end + +bash "create dcaeTest user" do + cwd '/var/tmp' + code <<-EOH + python /var/tmp/consumers.py + EOH +end diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/dcae_setup.rb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/dcae_setup.rb new file mode 100644 index 0000000..94ce0df --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/dcae_setup.rb @@ -0,0 +1,110 @@ +sdc_be_vip = node['BE_VIP'] +dcae_tosca_lab_vip = node['DCAE_TOSCA_LAB_VIP'] + + +if node['disableHttp'] + protocol = "https" + sdc_be_port = node['SDC']['BE']['https_port'] + dcae_be_port = node['DCAE']['BE']['https_port'] + dcae_tosca_lab_port = node['DCAE']['TOSCA_LAB']['https_port'] +else + protocol = "http" + sdc_be_port = node['SDC']['BE'][:http_port] + dcae_be_port = node['DCAE']['BE'][:http_port] + dcae_tosca_lab_port = node['DCAE']['TOSCA_LAB']['http_port'] +end + + +printf("DEBUG: [%s]:[%s] disableHttp=[%s], protocol=[%s], sdc_be_vip=[%s], sdc_be_port=[%s] !!! \n", cookbook_name, recipe_name, node['disableHttp'], protocol, sdc_be_vip ,sdc_be_port ) +raise "[ERROR] 'SDC_BE_FQDN' is not defined" if sdc_be_vip.nil? || sdc_be_vip == "" + + +directory "DCAE BE_tempdir_creation" do + path "#{ENV['JETTY_BASE']}/temp" + owner 'jetty' + group 'jetty' + mode '0755' + action :create +end + + +directory "#{ENV['JETTY_BASE']}/config" do + owner "jetty" + group "jetty" + mode '0755' + recursive true + action :create +end + + +directory "#{ENV['JETTY_BASE']}/config/dcae-be" do + owner "jetty" + group "jetty" + mode '0755' + recursive true + action :create +end + + +directory "#{ENV['JETTY_BASE']}/config/dcae-be/ves-schema" do + owner "jetty" + group "jetty" + mode '0755' + recursive true + action :create +end + + +template "dcae-be-config" do + sensitive true + path "#{ENV['JETTY_BASE']}/config/dcae-be/application.properties" + source "dcae-application.properties.erb" + owner "jetty" + group "jetty" + mode "0755" + variables({ + :consumerAuth => node['DCAE']['consumerName'] + ":" + node['DCAE']['consumerPass'] , + :dcae_be_port => dcae_be_port, + :sdc_be_vip => sdc_be_vip, + :sdc_be_port => sdc_be_port, + :protocol => protocol, + :dcae_tosca_lab_vip => dcae_tosca_lab_vip, + :dcae_tosca_lab_port => dcae_tosca_lab_port + }) +end + + +cookbook_file "#{ENV['JETTY_BASE']}/config/dcae-be/ves-schema/CommonEventFormat_v4.1.json" do + source "CommonEventFormat_v4.1.json" + owner "jetty" + group "jetty" + mode 0755 +end + + +cookbook_file "#{ENV['JETTY_BASE']}/config/dcae-be/ves-schema/CommonEventFormat_v5.3.json" do + source "CommonEventFormat_v5.3.json" + owner "jetty" + group "jetty" + mode 0755 +end + + +template "dcae-error-configuration" do + sensitive true + path "#{ENV['JETTY_BASE']}/config/dcae-be/error-configuration.yaml" + source "dcae-error-configuration.erb" + owner "jetty" + group "jetty" + mode "0755" +end + + +template "dcae-logback-spring-config" do + sensitive true + path "#{ENV['JETTY_BASE']}/config/dcae-be/logback-spring.xml" + source "dcae-logback-spring.erb" + owner "jetty" + group "jetty" + mode "0755" +end diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/jetty_setup.rb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/jetty_setup.rb new file mode 100644 index 0000000..36f6853 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/recipes/jetty_setup.rb @@ -0,0 +1,84 @@ +#Set the http module option +if node['disableHttp'] + http_option = "#--module=http" +else + http_option = "--module=http" +end + + +printf("DEBUG: [%s]:[%s] disableHttp=[%s], http_option=[%s] !!! \n", cookbook_name, recipe_name, node['disableHttp'], http_option ) + + +directory "Jetty_etcdir_creation" do + path "#{ENV['JETTY_BASE']}/etc" + owner 'jetty' + group 'jetty' + mode '0755' + action :create +end + + +cookbook_file "#{ENV['JETTY_BASE']}/etc/org.onap.sdc.p12" do + source "org.onap.sdc.p12" + owner "jetty" + group "jetty" + mode 0755 +end + + +cookbook_file "#{ENV['JETTY_BASE']}/etc/org.onap.sdc.trust.jks" do + source "org.onap.sdc.trust.jks" + owner "jetty" + group "jetty" + mode 0755 +end + + +bash "create-jetty-modules" do + cwd "#{ENV['JETTY_BASE']}" + code <<-EOH + cd "#{ENV['JETTY_BASE']}" + java -jar "/#{ENV['JETTY_HOME']}"/start.jar --add-to-start=deploy + java -jar "/#{ENV['JETTY_HOME']}"/start.jar --add-to-startd=http,https,console-capture,setuid + EOH +end + + +template "http-ini" do + path "#{ENV['JETTY_BASE']}/start.d/http.ini" + source "http-ini.erb" + owner "jetty" + group "jetty" + mode "0755" + variables({ + :http_option => http_option , + :http_port => "#{node['DCAE']['BE'][:http_port]}" + }) +end + + +template "https-ini" do + path "#{ENV['JETTY_BASE']}/start.d/https.ini" + source "https-ini.erb" + owner "jetty" + group "jetty" + mode "0755" + variables({ + :https_port => "#{node['DCAE']['BE'][:https_port]}" + }) +end + + +template "ssl-ini" do + path "#{ENV['JETTY_BASE']}/start.d/ssl.ini" + source "ssl-ini.erb" + owner "jetty" + group "jetty" + mode "0755" + variables({ + :https_port => "#{node['DCAE']['BE'][:https_port]}" , + :jetty_keystore_pwd => "#{node['jetty'][:keystore_pwd]}" , + :jetty_keymanager_pwd => "#{node['jetty'][:keymanager_pwd]}" , + :jetty_truststore_pwd => "#{node['jetty'][:truststore_pwd]}" + }) +end diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/consumers.py.erb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/consumers.py.erb new file mode 100644 index 0000000..c6b412c --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/consumers.py.erb @@ -0,0 +1,97 @@ +#!/usr/bin/python +import sys +import subprocess +#from time import sleep +import time +from datetime import datetime + + +class BColors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + +############################## +# Functions +############################## +def check_backend(): + command="curl -s -k --cacert org.onap.sdc.key -o /dev/null -I -w \"%{http_code}\" " \ + "-i <%= @protocol %>://<%= @catalog_ip %>:<%= @catalog_port %>/sdc2/rest/v1/user/jh0003" + + proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + (out, err) = proc.communicate() + result = out.strip() + return result + + +def check_consumer(consumer_name): + command="curl -s -k --cacert org.onap.sdc.key -o /dev/null -I -w \"%{http_code}\" -i -H " \ + "\"Accept: application/json; charset=UTF-8\" " \ + "-H \"Content-Type: application/json\" -H \"USER_ID: jh0003\" " \ + "\"<%= @protocol %>://<%= @catalog_ip %>:<%= @catalog_port %>/sdc2/rest/v1/consumers/" + consumer_name + + proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + (out, err) = proc.communicate() + result = out.strip() + return result + + +def create_consumer(consumerName, consumerSalt, consumerPass): + print '[INFO] ' + consumerName + command="curl -s -k --cacert org.onap.sdc.key -o /dev/null -w \"%{http_code}\" -X POST -i -H " \ + "\"Accept: application/json; charset=UTF-8\" -H \"Content-Type: application/json\" -H \"USER_ID: jh0003\" " \ + "<%= @protocol %>://<%= @catalog_ip %>:<%= @catalog_port %>/sdc2/rest/v1/consumers/ " \ + "-d '{\"consumerName\": '" + consumerName + "', \"consumerSalt\": '" \ + + consumerSalt + "',\"consumerPassword\": '" + consumerPass + "'}'" + + proc = subprocess.Popen( command , shell=True , stdout=subprocess.PIPE) + (out, err) = proc.communicate() + result = out.strip() + return result + + +############################## +# Definitions +############################## +consumers_list = ["<%= @consumerName %>"] +salt = "67fbde1c142bb25c7d6086252d6ab08d" +password = "d6e61a6859456cf4ded84f641ae59301ebf19d56cd5fc8a8f15b7cc54d3b6429" +beStat=0 + + +############################## +# Main +############################## + +for i in range(1,10): + my_result = check_backend() + if my_result == '200': + print '[INFO]: SDC Backend is up and running' + beStat=1 + break + else: + currentTime = datetime.now() + print '[ERROR]: ' + currentTime.strftime('%Y/%m/%d %H:%M:%S') + BColors.FAIL + \ + ' SDC Backend not responding, try #' + str(i) + BColors.ENDC + time.sleep(10) + +if beStat == 0: + print '[ERROR]: ' + time.strftime('%Y/%m/%d %H:%M:%S') + BColors.FAIL + 'SDC Backend is DOWN :-(' + BColors.ENDC + sys.exit(0) + +for consumer in consumers_list: + my_result = check_consumer(consumer) + if my_result == '200': + print '[INFO]: ' + consumer + ' already exists' + else: + my_result = create_consumer(consumer, salt, password) + if my_result == '201': + print '[INFO]: ' + consumer + ' created, result: [' + my_result + ']' + else: + print '[ERROR]: ' + BColors.FAIL + consumer + BColors.ENDC + ' error creating , result: [' + my_result + ']' diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-application.properties.erb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-application.properties.erb new file mode 100644 index 0000000..ecfa0bc --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-application.properties.erb @@ -0,0 +1,43 @@ +# Configure the server to run with SSL/TLS and using HTTPS +server.port=<%= @dcae_be_port %> +server.contextPath=/ +##ScheduleTimer +scheduled.timer.value=5000 +asdc_catalog_url=asdc +uri=<%= @protocol %>://<%= @consumerAuth %>@<%= @sdc_be_vip %>:<%= @sdc_be_port %>#demo +toscalab.url=http://<%= @dcae_tosca_lab_vip %>:<%= @dcae_tosca_lab_port %> +blueprinter.uri=${toscalab.url}/translate +blueprinter.hcuri=${toscalab.url}/healthcheck +blueprinter.hcretrynum=3 +asdc_rootPath=/sdc/v1/catalog/ +healthpoller.fixedDelay=120000 +compositionConfig.isRuleEditorActive=true +compositionConfig.flowTypes={"Syslog":{"entryPointPhaseName":"syslog_map","lastPhaseName":"map_publish"},\ + "SNMP":{"entryPointPhaseName":"snmp_map","lastPhaseName":"map_publish"},\ + "FOI":{"entryPointPhaseName":"foi_map","lastPhaseName":"map_publish"},\ + "Guest OS":{"entryPointPhaseName":"guest_os_map","lastPhaseName":"map_publish"},\ + "Status Poller":{"entryPointPhaseName":"status_poller_map","lastPhaseName":"map_publish"},\ + "SNMP Polling":{"entryPointPhaseName":"snmp_polling_map","lastPhaseName":"map_publish"},\ + "TCA Hi Lo":{"entryPointPhaseName":"tca_hi_lo_map","lastPhaseName":"map_publish"},\ + "Syslog Collector":{"entryPointPhaseName":"syslog_map","lastPhaseName":"syslog_publish"},\ + "Syslog MSEA":{"entryPointPhaseName":"syslog_map","lastPhaseName":"syslog_publish"},\ + "Status Poller Collector":{"entryPointPhaseName":"status_poller_map","lastPhaseName":"status_poller_publish"},\ + "Status Poller MSE":{"entryPointPhaseName":"snmp_map","lastPhaseName":"snmp_publish"},\ + "FOI Collector":{"entryPointPhaseName":"pmossFoiPhase","lastPhaseName":"foiEventToDmaapPhase"},\ + "Docker Map":{"entryPointPhaseName":"docker_map","lastPhaseName":"docker_publish"},\ + "SNMP MSE":{"entryPointPhaseName":"snmp_map","lastPhaseName":"snmp_publish"},\ + "SAM Collector":{"entryPointPhaseName":"sam_collector_map","lastPhaseName":"sam_collector_publish"},\ + "Docker MSE":{"entryPointPhaseName":"docker_map","lastPhaseName":"docker_publish"},\ + "SNMP PM Poller":{"entryPointPhaseName":"docker_map","lastPhaseName":"docker_publilsh"},\ + "Discovery and MIB Poller":{"entryPointPhaseName":"snmp_pm_map","lastPhaseName":"snmp_pm_publish"},\ + "Nagios docker MSE":{"entryPointPhaseName":"docker_map","lastPhaseName":"docker_publish"},\ + "Discovery Poller":{"entryPointPhaseName":"snmp_pm_map","lastPhaseName":"snmp_pm_publish"},\ + "MIB Poller":{"entryPointPhaseName":"snmp_pm_map","lastPhaseName":"snmp_pm_publish"},\ + "VES Fault SE":{"entryPointPhaseName":"ves_fault_map","lastPhaseName":"ves_fault_publish"},\ + "Docker Supplement":{"entryPointPhaseName":"docker_map","lastPhaseName":"docker_publish"},\ + "Docker Enrich":{"entryPointPhaseName":"docker_map","lastPhaseName":"docker_publish"},\ + "VES Collector":{"entryPointPhaseName":"sam_collector_map","lastPhaseName":"sam_collector_publish"},\ + "VES Fault":{"entryPointPhaseName":"ves_fault_map","lastPhaseName":"ves_fault_publish"},\ + "VES Heartbeat":{"entryPointPhaseName":"ves_heartbeat_map","lastPhaseName":"ves_heartbeat_publish"},\ + "VES Measurement":{"entryPointPhaseName":"ves_measurement_map","lastPhaseName":"ves_measurement_publish"},\ + "VES Syslog":{"entryPointPhaseName":"ves_syslog_map","lastPhaseName":"ves_syslog_publish"}} diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-error-configuration.erb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-error-configuration.erb new file mode 100644 index 0000000..d1fab01 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-error-configuration.erb @@ -0,0 +1,296 @@ +# DCAE-D Errors +errors: + OK: { + code: 200, + message: "OK", + messageId: "200" + } + CREATED: { + code: 201, + message: "OK", + messageId: "201" + } + NO_CONTENT: { + code: 204, + message: "No Content", + messageId: "204" + } +#--------POL4050----------------------------- + NOT_ALLOWED: { + code: 405, + message: "Error: Method not allowed.", + messageId: "POL4050" + } +#--------POL5000----------------------------- + GENERAL_ERROR: { + code: 500, + message: "Error: Internal Server Error. Please try again later.", + messageId: "POL5000" + } +#--------POL5500----------------------------- + CATALOG_NOT_AVAILABLE: { + code: 502, + message: "Error - SDC Catalog currently not available. Please try again later", + messageId: "POL5500" + } +#--------POL5501----------------------------- + AUTH_ERROR: { + code: 502, + message: "Error – connection to SDC catalog could not be authenticated. Please contact your administrator", + messageId: "POL5501" + } + +#---------SVC4000----------------------------- + INVALID_CONTENT: { + code: 400, + message: "Error: Invalid content.", + messageId: "SVC4000" + } + +#---------SVC6001----------------------------- + NOT_FOUND: { + code: 404, + message: "No Monitoring Templates were found.", + messageId: "SVC6001" + } +#---------SVC6005----------------------------- + CONFIGURATION_ERROR: { + code: 500, + message: "Error – Failed to find configuration.", + messageId: "SVC6005" + } +#---------SVC6006----------------------------- + VES_SCHEMA_NOT_FOUND: { + code: 500, + message: "Error – Failed to find VES Schema definitions.", + messageId: "SVC6006" + } +#---------SVC6007----------------------------- +# %1 - VES Schema filename +# %2 - error reason + VES_SCHEMA_INVALID: { + code: 500, + message: "Error – Failed to parse VES Schema file '%1'. [%2]", + messageId: "SVC6007" + } +#---------SVC6008----------------------------- + FLOW_TYPES_CONFIGURATION_ERROR: { + code: 500, + message: "Error – Failed to read flow type definitions.", + messageId: "SVC6008" + } +#---------SVC6010----------------------------- + CLONE_FAILED: { + code: 409, + message: "Error – could not import Monitoring Template.", + messageId: "SVC6010" + } +#---------SVC6020----------------------------- + EMPTY_SERVICE_LIST: { + code: 404, + message: "Service List is not available.", + messageId: "SVC6020" + } +#---------SVC6021----------------------------- + MONITORING_TEMPLATE_ATTACHMENT_ERROR: { + code: 409, + message: "Error – Monitoring Template is not available for editing.", + messageId: "SVC6021" + } + +#---------SVC6031----------------------------- +# %1 - Component Name + MISSING_TOSCA_FILE: { + code: 404, + message: "Error – Could not read component %1 details.", + messageId: "SVC6031" + } +#---------SVC6032----------------------------- +# %1 - Component Name + VALIDATE_TOSCA_ERROR: { + code: 500, + message: "Error – Component %1 details could not be parsed.", + messageId: "SVC6032" + } +#---------SVC6033----------------------------- + SUBMIT_BLUEPRINT_ERROR: { + code: 500, + message: "Error –Failed to submit blueprint.", + messageId: "SVC6033" + } +#---------SVC6034----------------------------- +# %1 -VFCMT Name + GENERATE_BLUEPRINT_ERROR: { + code: 500, + message: "Error – Failed to generate blueprint file for Monitoring Template %1.", + messageId: "SVC6034" + } +#---------SVC6035----------------------------- +# %1 - the error reason + INVALID_RULE_FORMAT: { + code: 400, + message: "Error - Rule format is invalid: %1.", + messageId: "SVC6035" + } +#---------SVC6036----------------------------- + SAVE_RULE_FAILED: { + code: 409, + message: "Error - Failed to save rule. Internal persistence error", + messageId: "SVC6036" + } + +#---------SVC6038----------------------------- +# %1 - VFCMT uuid + RESOURCE_NOT_VFCMT_ERROR: { + code: 400, + message: "Resource with uuid %1 is either not of resourceType='VFCMT' or not of category='Template' or not of subCategory='Monitoring Template'", + messageId: "SVC6038" + } +#---------SVC6039----------------------------- +# %1 - serviceUuid +# %2 - vfiName + VFI_FETCH_ERROR: { + code: 404, + message: "Service %1 does not contain a VFI named %2", + messageId: "SVC6039" + } +#-----------SVC6085--------------------------- +# %1 - current user id +# %2 - resource/service name +# %3 - last modifier user id + USER_CONFLICT: { + code: 403, + message: "Error: User conflict. Operation not allowed for user %1 on asset %2 checked out by %3.", + messageId: "SVC6085" + } + +#---------SVC6100----------------------------- + MISSING_RULE_DESCRIPTION: { + code: 400, + message: "Please enter a valid rule description.", + messageId: "SVC6100" + } +#---------SVC6101----------------------------- + MISSING_ACTION: { + code: 400, + message: "Rule must have at least one action.", + messageId: "SVC6101" + } + +#---------SVC6104----------------------------- +# %1 - action target + MISSING_CONCAT_VALUE: { + code: 400, + message: "Please fill all from fields of concat action to %1", + messageId: "SVC6104" + } +#---------SVC6105----------------------------- +# %1 - condition group type + INVALID_GROUP_CONDITION: { + code: 400, + message: "Undefined condition group type: %1", + messageId: "SVC6105" + } + +#---------SVC6106----------------------------- + MISSING_CONDITION_ITEM: { + code: 400, + message: "Please enter all condition items", + messageId: "SVC6106" + } +#---------SVC6107----------------------------- +# %1 - left/right + MISSING_OPERAND: { + code: 400, + message: "Please enter the %1 operand field", + messageId: "SVC6107" + } +#---------SVC6108----------------------------- +# %1 - operator + INVALID_OPERATOR: { + code: 400, + message: "Undefined operator: %1", + messageId: "SVC6108" + } +#---------SVC6109----------------------------- +# %1 - action target + MISSING_ENTRY: { + code: 400, + message: "Please fill all key-value pairs of map action to %1", + messageId: "SVC6109" + } +#---------SVC6110----------------------------- +# %1 - action target + MISSING_DEFAULT_VALUE: { + code: 400, + message: "Please fill the default value of map action to %1", + messageId: "SVC6110" + } +#---------SVC6111----------------------------- + DUPLICATE_KEY: { + code: 400, + message: "Error: Duplication in map keys exists, please modify rule configuration", + messageId: "SVC6111" + } +#---------SVC6112----------------------------- +# %1 - dependent actions + ACTION_DEPENDENCY: { + code: 400, + message: "A circular dependency was detected between actions. The following fields should be resolved: %1", + messageId: "SVC6112" + } +#---------SVC6113----------------------------- +# %1 - dependent rule ids +# %2 - dependent action targets + RULE_DEPENDENCY: { + code: 400, + message: "A circular dependency was detected between rules: %1 within fields: %2", + messageId: "SVC6113" + } +#---------SVC6114----------------------------- +# %1 - dcae component name + NODE_NOT_FOUND: { + code: 400, + message: "DCAE component %1 not found in composition", + messageId: "SVC6114" + } +#---------SVC6115----------------------------- + DELETE_RULE_FAILED: { + code: 409, + message: "Delete rule failed. Internal persistence error", + messageId: "SVC6115" + } +#---------SVC6116----------------------------- +# %1 - reason + TRANSLATE_FAILED: { + code: 400, + message: "Translation failed. Reason: %1", + messageId: "SVC6116" + } +#---------SVC6117----------------------------- +# %1 - field name +# %2 - action type +# %3 - action target + MISSING_ACTION_FIELD: { + code: 400, + message: "Please fill the %1 field of %2 action to %3", + messageId: "SVC6117" + } +#---------SVC6118----------------------------- + DELETE_BLUEPRINT_FAILED: { + code: 400, + message: "The request was partially successful. Removing the attached Blueprint from the service has failed. You must manually delete the artifact.", + messageId: "SVC6118" + } +#---------SVC6119----------------------------- + FILTER_NOT_FOUND: { + code: 404, + message: "Filter to delete not found", + messageId: "SVC6119" + } +#---------SVC6120----------------------------- + RULE_OPERATION_FAILED_MISSING_PARAMS: { + code: 404, + message: "One or more parameters is missing in the sent JSON", + messageId: "SVC6120" + }
\ No newline at end of file diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-logback-spring.erb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-logback-spring.erb new file mode 100644 index 0000000..7c0bfb6 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/dcae-logback-spring.erb @@ -0,0 +1,342 @@ +<!-- Copyright (c) 2016 AT&T Intellectual Property. All rights reserved. --> +<configuration scan="true" scanPeriod="3 seconds"> + <include resource="org/springframework/boot/logging/logback/base.xml"/> + <!--<jmxConfigurator /> --> + + <!-- specify the component name --> + <property name="componentName" value="DCAE"></property> + + <!-- log file names --> + <property name="generalLogName" value="application" /> + <property name="securityLogName" value="security" /> + <property name="performanceLogName" value="performance" /> + <property name="serverLogName" value="server" /> + <property name="policyLogName" value="policy" /> + <property name="errorLogName" value="error" /> + <property name="metricsLogName" value="metrics" /> + <property name="auditLogName" value="audit" /> + <property name="debugLogName" value="debug" /> + + <property name="defaultPattern" value="%date{ISO8601,UTC}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Timer}| %msg%n" /> + + <property name="auditLoggerPattern" value="%X{BeginTimestamp}|%X{EndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ElapsedTime}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| %msg%n" /> + <property name="metricsLoggerPattern" value="%X{BeginTimestamp}|%X{EndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ElapsedTime}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{TargetVirtualEntity}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| %msg%n" /> + <property name="errorLoggerPattern" value="%date{ISO8601,UTC}|%X{RequestId}|%thread|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%.-5level|%X{ErrorCode}|%X{ErrorDescription}| %msg%n" /> + <property name="debugLoggerPattern" value="%date{ISO8601,UTC}|%X{RequestId}| %msg%n" /> + + <property name="logDirectory" value="${log.home}/${componentName}" /> + + + <!-- Example evaluator filter applied against console appender --> + <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + + <!-- ============================================================================ --> + <!-- EELF Appenders --> + <!-- ============================================================================ --> + + <!-- The EELFAppender is used to record events to the general application + log --> + + + <appender name="EELF" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${generalLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELF" /> + </appender> + + <!-- EELF Security Appender. This appender is used to record security events + to the security log file. Security events are separate from other loggers + in EELF so that security log records can be captured and managed in a secure + way separate from the other logs. This appender is set to never discard any + events. --> + <appender name="EELFSecurity" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${securityLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <discardingThreshold>0</discardingThreshold> + <appender-ref ref="EELFSecurity" /> + </appender> + + <!-- EELF Performance Appender. This appender is used to record performance + records. --> + <appender name="EELFPerformance" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${performanceLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFPerformance" /> + </appender> + + <!-- EELF Server Appender. This appender is used to record Server related + logging events. The Server logger and appender are specializations of the + EELF application root logger and appender. This can be used to segregate Server + events from other components, or it can be eliminated to record these events + as part of the application root log. --> + <appender name="EELFServer" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${serverLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFServer" /> + </appender> + + + <!-- EELF Policy Appender. This appender is used to record Policy engine + related logging events. The Policy logger and appender are specializations + of the EELF application root logger and appender. This can be used to segregate + Policy engine events from other components, or it can be eliminated to record + these events as part of the application root log. --> + <appender name="EELFPolicy" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${policyLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${defaultPattern}</pattern> + </encoder> + </appender> + <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFPolicy" /> + </appender> + + + <!-- EELF Audit Appender. This appender is used to record audit engine + related logging events. The audit logger and appender are specializations + of the EELF application root logger and appender. This can be used to segregate + Policy engine events from other components, or it can be eliminated to record + these events as part of the application root log. --> + + <!-- Audit log --> + <appender name="EELFAudit" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${auditLogName}.log</file> + <!-- Audit messages filter - accept audit messages --> + <filter class="ch.qos.logback.core.filter.EvaluatorFilter"> + <evaluator class="ch.qos.logback.classic.boolex.OnMarkerEvaluator"> + <marker>AUDIT_MARKER</marker> + <marker>AUDIT</marker> + </evaluator> + <onMismatch>DENY</onMismatch> + <onMatch>ACCEPT</onMatch> + </filter> + + <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip</fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>10</maxIndex> + </rollingPolicy> + + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>20MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${auditLoggerPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFAudit" /> + </appender> + +<appender name="EELFMetrics" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${metricsLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - + %msg%n"</pattern> --> + <pattern>${metricsLoggerPattern}</pattern> + </encoder> + </appender> + + + <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFMetrics"/> + </appender> + + <appender name="EELFError" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${errorLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${errorLoggerPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFError"/> + </appender> + + <appender name="EELFDebug" + class="ch.qos.logback.core.rolling.RollingFileAppender"> + <file>${logDirectory}/${debugLogName}.log</file> + <rollingPolicy + class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy"> + <fileNamePattern>${logDirectory}/${debugLogName}.%i.log.zip + </fileNamePattern> + <minIndex>1</minIndex> + <maxIndex>9</maxIndex> + </rollingPolicy> + <triggeringPolicy + class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> + <maxFileSize>5MB</maxFileSize> + </triggeringPolicy> + <encoder> + <pattern>${debugLoggerPattern}</pattern> + </encoder> + </appender> + + <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender"> + <queueSize>256</queueSize> + <appender-ref ref="EELFDebug" /> + <includeCallerData>true</includeCallerData> + </appender> + + + <!-- ============================================================================ --> + <!-- EELF loggers --> + <!-- ============================================================================ --> + <logger name="com.att.eelf" level="info" additivity="false"> + <appender-ref ref="asyncEELF" /> + </logger> + <logger name="com.att.eelf.security" level="info" additivity="false"> + <appender-ref ref="asyncEELFSecurity" /> + </logger> + <logger name="com.att.eelf.perf" level="info" additivity="false"> + <appender-ref ref="asyncEELFPerformance" /> + </logger> + <logger name="com.att.eelf.server" level="info" additivity="false"> + <appender-ref ref="asyncEELFServer" /> + </logger> + <logger name="com.att.eelf.policy" level="info" additivity="false"> + <appender-ref ref="asyncEELFPolicy" /> + </logger> + + <logger name="org.openecomp.sdc.common.ecomplog.EcompLoggerAudit" level="info" additivity="false"> + <appender-ref ref="asyncEELFAudit" /> + </logger> + + <logger name="com.att.eelf.metrics" level="info" additivity="false"> + <appender-ref ref="asyncEELFMetrics" /> + </logger> + + + <logger name="com.att.eelf.error" level="error" additivity="false"> + <appender-ref ref="asyncEELFError" /> + </logger> + + <logger name="com.att.eelf.debug" level="debug" additivity="false"> + <appender-ref ref="asyncEELFDebug" /> + </logger> + + + + + <root level="INFO"> + <appender-ref ref="asyncEELF" /> + </root> + +</configuration> diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/http-ini.erb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/http-ini.erb new file mode 100644 index 0000000..8f26690 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/http-ini.erb @@ -0,0 +1,29 @@ +# --------------------------------------- +# Module: http +<%= @http_option %> + +### HTTP Connector Configuration + +## Connector host/address to bind to +# jetty.http.host=0.0.0.0 + +## Connector port to listen on +jetty.http.port=<%= @http_port %> + +## Connector idle timeout in milliseconds +jetty.http.idleTimeout=30000 + +## Connector socket linger time in seconds (-1 to disable) +# jetty.http.soLingerTime=-1 + +## Number of acceptors (-1 picks default based on number of cores) +# jetty.http.acceptors=-1 + +## Number of selectors (-1 picks default based on number of cores) +# jetty.http.selectors=-1 + +## ServerSocketChannel backlog (0 picks platform default) +# jetty.http.acceptorQueueSize=0 + +## Thread priority delta to give to acceptor threads +# jetty.http.acceptorPriorityDelta=0 diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/https-ini.erb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/https-ini.erb new file mode 100644 index 0000000..9999a41 --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/https-ini.erb @@ -0,0 +1,29 @@ +# --------------------------------------- +# Module: https +--module=https + +### HTTPS Connector Configuration + +## Connector host/address to bind to +# jetty.https.host=0.0.0.0 + +## Connector port to listen on +jetty.https.port=<%= @https_port %> + +## Connector idle timeout in milliseconds +jetty.https.idleTimeout=30000 + +## Connector socket linger time in seconds (-1 to disable) +# jetty.https.soLingerTime=-1 + +## Number of acceptors (-1 picks default based on number of cores) +# jetty.https.acceptors=-1 + +## Number of selectors (-1 picks default based on number of cores) +# jetty.https.selectors=-1 + +## ServerSocketChannel backlog (0 picks platform default) +# jetty.https.acceptorQueueSize=0 + +## Thread priority delta to give to acceptor threads +# jetty.https.acceptorPriorityDelta=0 diff --git a/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/ssl-ini.erb b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/ssl-ini.erb new file mode 100644 index 0000000..278fdea --- /dev/null +++ b/docker/docker_be/chef-repo/cookbooks/Deploy-DCAE/templates/default/ssl-ini.erb @@ -0,0 +1,90 @@ +# --------------------------------------- +# Module: ssl +--module=ssl + +### TLS(SSL) Connector Configuration + +## Connector host/address to bind to +# jetty.ssl.host=0.0.0.0 + +## Connector port to listen on +jetty.ssl.port=<%= @https_port %> + +## Connector idle timeout in milliseconds +# jetty.ssl.idleTimeout=30000 + +## Connector socket linger time in seconds (-1 to disable) +# jetty.ssl.soLingerTime=-1 + +## Number of acceptors (-1 picks default based on number of cores) +# jetty.ssl.acceptors=-1 + +## Number of selectors (-1 picks default based on number of cores) +# jetty.ssl.selectors=-1 + +## ServerSocketChannel backlog (0 picks platform default) +# jetty.ssl.acceptorQueueSize=0 + +## Thread priority delta to give to acceptor threads +# jetty.ssl.acceptorPriorityDelta=0 + +## Whether request host names are checked to match any SNI names +# jetty.ssl.sniHostCheck=true + +## max age in seconds for a Strict-Transport-Security response header (default -1) +# jetty.ssl.stsMaxAgeSeconds=31536000 + +## include subdomain property in any Strict-Transport-Security header (default false) +# jetty.ssl.stsIncludeSubdomains=true + +### SslContextFactory Configuration +## Note that OBF passwords are not secure, just protected from casual observation +## See http://www.eclipse.org/jetty/documentation/current/configuring-security-secure-passwords.html + +## Keystore file path (relative to $jetty.base) +jetty.sslContext.keyStorePath=etc/org.onap.sdc.p12 + +## Truststore file path (relative to $jetty.base) +jetty.sslContext.trustStorePath=etc/org.onap.sdc.trust.jks + +## Keystore password +# jetty.sslContext.keyStorePassword=OBF:1vny1zlo1x8e1vnw1vn61x8g1zlu1vn4 +jetty.sslContext.keyStorePassword=<%= @jetty_keystore_pwd %> + +## Keystore type and provider +# jetty.sslContext.keyStoreType=JKS +# jetty.sslContext.keyStoreProvider= + +## KeyManager password +# jetty.sslContext.keyManagerPassword=OBF:1u2u1wml1z7s1z7a1wnl1u2g +jetty.sslContext.keyManagerPassword=<%= @jetty_keymanager_pwd %> + +## Truststore password +# jetty.sslContext.trustStorePassword=OBF:1vny1zlo1x8e1vnw1vn61x8g1zlu1vn4 +jetty.sslContext.trustStorePassword=<%= @jetty_truststore_pwd %> + +## Truststore type and provider +# jetty.sslContext.trustStoreType=JKS +# jetty.sslContext.trustStoreProvider= + +## whether client certificate authentication is required +# jetty.sslContext.needClientAuth=false + +## Whether client certificate authentication is desired +# jetty.sslContext.wantClientAuth=false + +## Whether cipher order is significant (since java 8 only) +# jetty.sslContext.useCipherSuitesOrder=true + +## To configure Includes / Excludes for Cipher Suites or Protocols see tweak-ssl.xml example at +## https://www.eclipse.org/jetty/documentation/current/configuring-ssl.html#configuring-sslcontextfactory-cipherSuites + +## Set the size of the SslSession cache +# jetty.sslContext.sslSessionCacheSize=-1 + +## Set the timeout (in seconds) of the SslSession cache timeout +# jetty.sslContext.sslSessionTimeout=-1 + +## Allow SSL renegotiation +# jetty.sslContext.renegotiationAllowed=true +# jetty.sslContext.renegotiationLimit=5 |