summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--extra/docker/elk/docker-compose.yml20
-rw-r--r--extra/docker/elk/logstash-conf/logstash.conf148
-rw-r--r--pom.xml2
-rw-r--r--src/main/docker/Dockerfile.logstash7
-rw-r--r--src/main/docker/logstash/Dockerfile.logstash28
-rw-r--r--src/main/docker/logstash/certs/aafca.pem26
-rw-r--r--src/main/docker/logstash/pipeline/logstash.conf257
7 files changed, 323 insertions, 165 deletions
diff --git a/extra/docker/elk/docker-compose.yml b/extra/docker/elk/docker-compose.yml
index 3c5652a96..7ac58e808 100644
--- a/extra/docker/elk/docker-compose.yml
+++ b/extra/docker/elk/docker-compose.yml
@@ -15,11 +15,13 @@ services:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
logstash:
- image: docker.elastic.co/logstash/logstash-oss:6.1.3
+ image: onap/clamp-dashboard-logstash:latest
+ build:
+ context: ../../../src/main/docker/logstash
+ dockerfile: Dockerfile.logstash
volumes:
- - ./logstash-conf:/config-dir
+ - ../../../src/main/docker/logstash/pipeline:/usr/share/logstash/pipeline
- ./logstash-input:/log-input
- command: logstash -f /config-dir/logstash.conf
depends_on:
- elasticsearch
networks:
@@ -28,12 +30,12 @@ services:
- dmaap_base_url=http://dmaapmocker:3904/
- dmaap_user=user
- dmaap_password=password
- - dmaap_consumer_group=CG42
- - dmaap_consumer_id=C42
- - event_topic=EVENT_TOPIC
- - notification_topic=NOTIFICATION_TOPIC
- - request_topic=REQUEST_TOPIC
- - elasticsearch_hosts=elasticsearch
+ - dmaap_consumer_group=clampdashboard
+ - dmaap_consumer_id=clampdashboard
+ - event_topic=DCAE-CL-EVENT
+ - notification_topic=POLICY-CL-MGT
+ - request_topic=APPC-CL
+ - elasticsearch_base_url=elasticsearch
kibana:
image: onap/clamp-dashboard-kibana:latest
diff --git a/extra/docker/elk/logstash-conf/logstash.conf b/extra/docker/elk/logstash-conf/logstash.conf
deleted file mode 100644
index 2b71686fa..000000000
--- a/extra/docker/elk/logstash-conf/logstash.conf
+++ /dev/null
@@ -1,148 +0,0 @@
-input {
- http_poller {
- urls => {
- event_queue => {
- method => get
- url => "${dmaap_base_url}/events/${event_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000"
- headers => {
- Accept => "application/json"
- }
- add_field => { "topic" => "${event_topic}" }
- type => "dmaap_event"
- }
- notification_queue => {
- method => get
- url => "${dmaap_base_url}/events/${notification_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000"
- headers => {
- Accept => "application/json"
- }
- add_field => { "topic" => "${notification_topic}" }
- type => "dmaap_notification"
- }
- request_queue => {
- method => get
- url => "${dmaap_base_url}/events/${request_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000"
- headers => {
- Accept => "application/json"
- }
- add_field => { "topic" => "${request_topic}" }
- type => "dmaap_request"
- }
- }
- socket_timeout => 30
- request_timeout => 30
- codec => "plain"
- schedule => { "every" => "1m" }
- }
-}
-
-input {
- file {
- path => [
- "/log-input/dmaap_evt.log"
- ]
- type => "dmaap_log"
- codec => "json"
- }
-}
-
-filter {
-
- # parse json, split the list into multiple events, and parse each event
- if [type] != "dmaap_log" {
- # avoid noise if no entry in the list
- if [message] == "[]" {
- drop { }
- }
-
- json {
- source => "[message]"
- target => "message"
- }
-# ruby {
-# code => "event.get('message').each{|m| m.set('type',event.get('type')}"
-# }
- split {
- field => "message"
- add_field => {
- "type" => "%{type}"
- "topic" => "%{topic}"
- }
- }
-
- json {
- source => "message"
- }
-
- mutate { remove_field => [ "message" ] }
- }
-
- # express timestamps in milliseconds instead of microseconds
- if [closedLoopAlarmStart] {
- ruby {
- code => "
- if event.get('closedLoopAlarmStart').to_s.to_i(10) > 9999999999999
- event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10) / 1000)
- else
- event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10))
- end
- "
- }
- date {
- match => [ "closedLoopAlarmStart", UNIX_MS ]
- target => "closedLoopAlarmStart"
- }
- }
-
- if [closedLoopAlarmEnd] {
- ruby {
- code => "
- if event.get('closedLoopAlarmEnd').to_s.to_i(10) > 9999999999999
- event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10) / 1000)
- else
- event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10))
- end
- "
- }
- date {
- match => [ "closedLoopAlarmEnd", UNIX_MS ]
- target => "closedLoopAlarmEnd"
- }
-
- }
- #"yyyy-MM-dd HH:mm:ss"
- if [notificationTime] {
- mutate {
- gsub => [
- "notificationTime", " ", "T"
- ]
- }
- date {
- match => [ "notificationTime", ISO8601 ]
- target => "notificationTime"
- }
- }
-}
-output {
- stdout {
- codec => rubydebug
- }
-
- if [http_request_failure] {
- elasticsearch {
- codec => "json"
- hosts => ["${elasticsearch_hosts}"]
- index => "errors-%{+YYYY.MM.DD}"
- doc_as_upsert => true
- }
- } else {
- elasticsearch {
- codec => "json"
- hosts => ["${elasticsearch_hosts}"]
- index => "events-%{+YYYY.MM.DD}" # creates daily indexes
- doc_as_upsert => true
-
- }
- }
-
-}
diff --git a/pom.xml b/pom.xml
index 9149dbf89..7e748a6cf 100644
--- a/pom.xml
+++ b/pom.xml
@@ -958,7 +958,7 @@
<tag>${project.docker.latesttagtimestamp.version}</tag>
<tag>${project.docker.latesttag.version}</tag>
</tags>
- <dockerFile>Dockerfile.logstash</dockerFile>
+ <dockerFile>logstash/Dockerfile.logstash</dockerFile>
</build>
</image>
<image>
diff --git a/src/main/docker/Dockerfile.logstash b/src/main/docker/Dockerfile.logstash
deleted file mode 100644
index 86b52c988..000000000
--- a/src/main/docker/Dockerfile.logstash
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM docker.elastic.co/logstash/logstash-oss:6.1.3
-
-MAINTAINER "The Onap Team"
-LABEL Description="Logstash image with some plugins needed for the clamp dashboard"
-
-RUN /usr/share/logstash/bin/logstash-plugin install logstash-filter-elasticsearch
-RUN /usr/share/logstash/bin/logstash-plugin install logstash-filter-prune
diff --git a/src/main/docker/logstash/Dockerfile.logstash b/src/main/docker/logstash/Dockerfile.logstash
new file mode 100644
index 000000000..be33b311d
--- /dev/null
+++ b/src/main/docker/logstash/Dockerfile.logstash
@@ -0,0 +1,28 @@
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM docker.elastic.co/logstash/logstash-oss:6.1.3
+
+MAINTAINER "The Onap Team"
+LABEL Description="Logstash image with some plugins needed for the clamp dashboard"
+
+# Default aaf certificates
+COPY certs /certs.d/
+
+# remove default pipeline first
+COPY pipeline/logstash.conf /usr/share/logstash/pipeline/logstash.conf
+
+# add plugins needed by aggregation part of the pipeline
+RUN /usr/share/logstash/bin/logstash-plugin install logstash-filter-elasticsearch
+RUN /usr/share/logstash/bin/logstash-plugin install logstash-filter-prune
diff --git a/src/main/docker/logstash/certs/aafca.pem b/src/main/docker/logstash/certs/aafca.pem
new file mode 100644
index 000000000..cf12ec4c6
--- /dev/null
+++ b/src/main/docker/logstash/certs/aafca.pem
@@ -0,0 +1,26 @@
+-----BEGIN CERTIFICATE-----
+MIIEVDCCAjygAwIBAgIBAjANBgkqhkiG9w0BAQsFADAsMQ4wDAYDVQQLDAVPU0FB
+RjENMAsGA1UECgwET05BUDELMAkGA1UEBhMCVVMwHhcNMTgwNjA1MDg1MTQxWhcN
+MjMwNjA1MDg1MTQxWjBHMQswCQYDVQQGEwJVUzENMAsGA1UECgwET05BUDEOMAwG
+A1UECwwFT1NBQUYxGTAXBgNVBAMMEGludGVybWVkaWF0ZUNBXzEwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDOXCdZIoWM0EnEEw3qPiVMhAgNolWCTaLt
+eI2TjlTQdGDIcXdBZukHdNeOKYzOXRsLt6bLRtd5yARpn53EbzS/dgAyHuaz1HjE
+5IPWSFRg9SulfHUmcS+GBt1+KiMJTlOsw6wSA73H/PjjXBbWs/uRJTnaNmV3so7W
+DhNW6fHOrbom4p+3FucbB/QAM9b/3l/1LKnRgdXx9tekDnaKN5u3HVBmyOlRhaRp
+tscLUCT3jijoGAPRcYZybgrpa0z3iCWquibTO/eLwuO/Dn7yHWau9ZZAHGPBSn9f
+TiLKRYV55mNjr3zvs8diTPECFPW8w8sRIH3za1aKHgUC1gd87Yr3AgMBAAGjZjBk
+MB0GA1UdDgQWBBQa1FdycErTZ6nr4dxiMbKH0P7vqjAfBgNVHSMEGDAWgBRTVTPy
+S+vQUbHBeJrBKDF77+rtSTASBgNVHRMBAf8ECDAGAQH/AgEAMA4GA1UdDwEB/wQE
+AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAlA/RTPy5i09fJ4ytSAmAdytMwEwRaU9F
+dshG7LU9q95ODsuM79yJvV9+ISIJZRsBqf5PDv93bUCKKHIYGvR6kNd+n3yx/fce
+txDkC/tMj1T9D8TuDKAclGEO9K5+1roOQQFxr4SE6XKb/wjn8OMrCoJ75S0F3htF
+LKL85T77JeGeNgSk8JEsZvQvj32m0gv9rxi5jM/Zi5E2vxrBR9T1v3kVvlt6+PSF
+BoHXROk5HQmdHxnH+VYQtDHSwj9Xe9aoJMyL0WjYKd//8NUO+VACDOtK4Nia6gy9
+m/n9kMASMw6f9iF4n6t4902RWrRKTYM1CVu5wyVklVbEdE9i6Db4CpL9E8HpBUAP
+t44JiNzuFkDmSE/z5XuQIimDt6nzOaSF8pX2KHY2ICDLwpMNUvxzqXD9ECbdspiy
+JC2RGq8uARGGl6kQQBKDNO8SrO7rSBPANd1+LgqrKbCrHYfvFgkZPgT5MlQi+E1G
+LNT+i6fzZha9ed/L6yjl5Em71flJGFwRZl2pfErZRxp8pLPcznYyIpSjcwnqNCRC
+orhlp8nheiODC3oO3AFHDiFgUqvm8hgpnT2cPk2lpU2VY1TcZ8sW5qUDCxINIPcW
+u1SAsa87IJK3vEzPZfTCs/S6XThoqRfXj0c0Rahj7YFRi/PqIPY0ejwdtmZ9m9pZ
+8Lb0GYmlo44=
+-----END CERTIFICATE-----
diff --git a/src/main/docker/logstash/pipeline/logstash.conf b/src/main/docker/logstash/pipeline/logstash.conf
new file mode 100644
index 000000000..e6cee9c19
--- /dev/null
+++ b/src/main/docker/logstash/pipeline/logstash.conf
@@ -0,0 +1,257 @@
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+input {
+ http_poller {
+ urls => {
+ event_queue => {
+ method => get
+ url => "${dmaap_base_url}/events/${event_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000"
+ headers => {
+ Accept => "application/json"
+ }
+ add_field => { "topic" => "${event_topic}" }
+ type => "dmaap_event"
+ }
+ notification_queue => {
+ method => get
+ url => "${dmaap_base_url}/events/${notification_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000"
+ headers => {
+ Accept => "application/json"
+ }
+ add_field => { "topic" => "${notification_topic}" }
+ type => "dmaap_notification"
+ }
+ request_queue => {
+ method => get
+ url => "${dmaap_base_url}/events/${request_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000"
+ headers => {
+ Accept => "application/json"
+ }
+ add_field => { "topic" => "${request_topic}" }
+ type => "dmaap_request"
+ }
+ }
+ socket_timeout => 30
+ request_timeout => 30
+ codec => "plain"
+ schedule => { "every" => "1m" }
+ cacert => "/certs.d/aafca.pem"
+ }
+}
+
+input {
+ file {
+ path => [
+ "/log-input/*"
+ ]
+ type => "dmaap_log"
+ codec => "json"
+ }
+}
+
+filter {
+ # avoid noise if no entry in the list
+ if [message] == "[]" {
+ drop { }
+ }
+
+ if [http_request_failure] or [@metadata][code] != "200" {
+ mutate {
+ add_tag => [ "error" ]
+ }
+ }
+
+ if "dmaap_source" in [tags] {
+ #
+ # Dmaap provides a json list, whose items are Strings containing the event
+ # provided to Dmaap, which itself is an escaped json.
+ #
+ # We first need to parse the json as we have to use the plaintext as it cannot
+ # work with list of events, then split that list into multiple string events,
+ # that we then transform into json.
+ #
+ json {
+ source => "[message]"
+ target => "message"
+ }
+ ruby {
+ code => "
+ for ev in event.get('message', [])
+ ev.set('@metadata', event.get('@metadata'))
+ end
+ "
+ }
+
+ split {
+ field => "message"
+ }
+ json {
+ source => "message"
+ }
+ mutate {
+ remove_field => [ "message" ]
+ }
+ }
+
+ #
+ # Some timestamps are expressed as milliseconds, some are in microseconds
+ #
+ if [closedLoopAlarmStart] {
+ ruby {
+ code => "
+ if event.get('closedLoopAlarmStart').to_s.to_i(10) > 9999999999999
+ event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10) / 1000)
+ else
+ event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10))
+ end
+ "
+ }
+ date {
+ match => [ "closedLoopAlarmStart", UNIX_MS ]
+ target => "closedLoopAlarmStart"
+ }
+ }
+
+ if [closedLoopAlarmEnd] {
+ ruby {
+ code => "
+ if event.get('closedLoopAlarmEnd').to_s.to_i(10) > 9999999999999
+ event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10) / 1000)
+ else
+ event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10))
+ end
+ "
+ }
+ date {
+ match => [ "closedLoopAlarmEnd", UNIX_MS ]
+ target => "closedLoopAlarmEnd"
+ }
+
+ }
+
+
+ #
+ # Notification time are expressed under the form "yyyy-MM-dd HH:mm:ss", which
+ # is close to ISO8601, but lacks of T as spacer: "yyyy-MM-ddTHH:mm:ss"
+ #
+ if [notificationTime] {
+ mutate {
+ gsub => [ "notificationTime", " ", "T" ]
+ }
+ date {
+ match => [ "notificationTime", ISO8601 ]
+ target => "notificationTime"
+ }
+ }
+
+
+ #
+ # Renaming some fields for readability
+ #
+ if [AAI][generic-vnf.vnf-name] {
+ mutate {
+ add_field => { "vnfName" => "%{[AAI][generic-vnf.vnf-name]}" }
+ }
+ }
+ if [AAI][generic-vnf.vnf-type] {
+ mutate {
+ add_field => { "vnfType" => "%{[AAI][generic-vnf.vnf-type]}" }
+ }
+ }
+ if [AAI][vserver.vserver-name] {
+ mutate {
+ add_field => { "vmName" => "%{[AAI][vserver.vserver-name]}" }
+ }
+ }
+ if [AAI][complex.city] {
+ mutate {
+ add_field => { "locationCity" => "%{[AAI][complex.city]}" }
+ }
+ }
+ if [AAI][complex.state] {
+ mutate {
+ add_field => { "locationState" => "%{[AAI][complex.state]}" }
+ }
+ }
+
+
+ #
+ # Adding some flags to ease aggregation
+ #
+ if [closedLoopEventStatus] =~ /(?i)ABATED/ {
+ mutate {
+ add_field => { "flagAbated" => "1" }
+ }
+ }
+ if [notification] =~ /^.*?(?:\b|_)FINAL(?:\b|_).*?(?:\b|_)FAILURE(?:\b|_).*?$/ {
+ mutate {
+ add_field => { "flagFinalFailure" => "1" }
+ }
+ }
+
+
+ if "error" not in [tags] {
+ #
+ # Creating data for a secondary index
+ #
+ clone {
+ clones => [ "event-cl-aggs" ]
+ add_tag => [ "event-cl-aggs" ]
+ }
+
+ if "event-cl-aggs" in [tags] {
+ #
+ # we only need a few fields for aggregations; remove all fields from clone except :
+ # vmName,vnfName,vnfType,requestID,closedLoopAlarmStart, closedLoopControlName,closedLoopAlarmEnd,abated,nbrDmaapevents,finalFailure
+ #
+ prune {
+ whitelist_names => ["^@.*$","^topic$","^type$","^tags$","^flagFinalFailure$","^flagAbated$","^locationState$","^locationCity$","^vmName$","^vnfName$","^vnfType$","^requestID$","^closedLoopAlarmStart$","^closedLoopControlName$","^closedLoopAlarmEnd$","^target$","^target_type$","^triggerSourceName$","^policyScope$","^policyName$","^policyVersion$"]
+ }
+
+ }
+ }
+}
+
+output {
+ stdout {
+ codec => rubydebug { metadata => true }
+ }
+
+ if "error" in [tags] {
+ elasticsearch {
+ codec => "json"
+ hosts => ["${elasticsearch_base_url}"]
+ index => "errors-%{+YYYY.MM.DD}"
+ doc_as_upsert => true
+ }
+
+ } else if "event-cl-aggs" in [tags] {
+ elasticsearch {
+ codec => "json"
+ hosts => ["${elasticsearch_base_url}"]
+ document_id => "%{requestID}"
+ index => "events-cl-%{+YYYY.MM.DD}" # creates daily indexes for control loop
+ doc_as_upsert => true
+ action => "update"
+ }
+
+ } else {
+ elasticsearch {
+ codec => "json"
+ hosts => ["${elasticsearch_base_url}"]
+ index => "events-%{+YYYY.MM.DD}" # creates daily indexes
+ doc_as_upsert => true
+ }
+ }
+}