diff options
Diffstat (limited to 'kubernetes/clamp/components/clamp-dash-logstash/resources/config')
-rw-r--r-- | kubernetes/clamp/components/clamp-dash-logstash/resources/config/logstash.yml | 24 | ||||
-rw-r--r-- | kubernetes/clamp/components/clamp-dash-logstash/resources/config/pipeline.conf | 275 |
2 files changed, 299 insertions, 0 deletions
diff --git a/kubernetes/clamp/components/clamp-dash-logstash/resources/config/logstash.yml b/kubernetes/clamp/components/clamp-dash-logstash/resources/config/logstash.yml new file mode 100644 index 0000000000..cecd5b18c8 --- /dev/null +++ b/kubernetes/clamp/components/clamp-dash-logstash/resources/config/logstash.yml @@ -0,0 +1,24 @@ +# Copyright © 2020 AT&T, Amdocs, Bell Canada Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +http.host: "0.0.0.0" +## Path where pipeline configurations reside +path.config: /usr/share/logstash/pipeline + +## Type of queue : memeory based or file based +#queue.type: persisted +## Size of queue +#queue.max_bytes: 1024mb +## Setting true makes logstash check periodically for change in pipeline configurations +config.reload.automatic: true + diff --git a/kubernetes/clamp/components/clamp-dash-logstash/resources/config/pipeline.conf b/kubernetes/clamp/components/clamp-dash-logstash/resources/config/pipeline.conf new file mode 100644 index 0000000000..87c8f06e42 --- /dev/null +++ b/kubernetes/clamp/components/clamp-dash-logstash/resources/config/pipeline.conf @@ -0,0 +1,275 @@ +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +input { + http_poller { + urls => { + event_queue => { + method => get + url => "${dmaap_base_url}/events/${event_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000" + headers => { + Accept => "application/json" + } + topic => "${event_topic}" + tags => [ "dmaap_source" ] + } + notification_queue => { + method => get + url => "${dmaap_base_url}/events/${notification_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000" + headers => { + Accept => "application/json" + } + topic => "${notification_topic}" + tags => [ "dmaap_source" ] + } + request_queue => { + method => get + url => "${dmaap_base_url}/events/${request_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000" + headers => { + Accept => "application/json" + } + topic => "${request_topic}" + tags => [ "dmaap_source" ] + } + } + socket_timeout => 30 + request_timeout => 30 + schedule => { "every" => "1m" } + codec => "plain" +{{- if .Values.global.aafEnabled }} + cacert => "{{ .Values.certInitializer.credsPath }}/{{ .Values.certInitializer.clamp_ca_certs_pem }}" +{{- else }} + cacert => "/certs.d/aafca.pem" +{{- end }} + } +} + + +filter { + # avoid noise if no entry in the list + if [message] == "[]" { + drop { } + } + + if [http_request_failure] or [@metadata][code] != 200 { + mutate { + add_tag => [ "error" ] + } + } + + if "dmaap_source" in [@metadata][request][tags] { + # + # Dmaap provides a json list, whose items are Strings containing the event + # provided to Dmaap, which itself is an escaped json. + # + # We first need to parse the json as we have to use the plaintext as it cannot + # work with list of events, then split that list into multiple string events, + # that we then transform into json. + # + json { + source => "[message]" + target => "message" + } + + split { + field => "message" + } + json { + source => "message" + } + mutate { + remove_field => [ "message" ] + } + } + + # + # Some timestamps are expressed as milliseconds, some are in microseconds + # + if [closedLoopAlarmStart] { + ruby { + code => " + if event.get('closedLoopAlarmStart').to_s.to_i(10) > 9999999999999 + event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10) / 1000) + else + event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10)) + end + " + } + date { + match => [ "closedLoopAlarmStart", UNIX_MS ] + target => "closedLoopAlarmStart" + } + } + + if [closedLoopAlarmEnd] { + ruby { + code => " + if event.get('closedLoopAlarmEnd').to_s.to_i(10) > 9999999999999 + event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10) / 1000) + else + event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10)) + end + " + } + date { + match => [ "closedLoopAlarmEnd", UNIX_MS ] + target => "closedLoopAlarmEnd" + } + + } + + + # + # Notification time are expressed under the form "yyyy-MM-dd HH:mm:ss", which + # is close to ISO8601, but lacks of T as spacer: "yyyy-MM-ddTHH:mm:ss" + # + if [notificationTime] { + mutate { + gsub => [ + "notificationTime", " ", "T" + ] + } + date { + match => [ "notificationTime", ISO8601 ] + target => "notificationTime" + } + } + + + # + # Renaming some fields for readability + # + if [AAI][generic-vnf.vnf-name] { + mutate { + add_field => { "vnfName" => "%{[AAI][generic-vnf.vnf-name]}" } + } + } + if [AAI][generic-vnf.vnf-type] { + mutate { + add_field => { "vnfType" => "%{[AAI][generic-vnf.vnf-type]}" } + } + } + if [AAI][vserver.vserver-name] { + mutate { + add_field => { "vmName" => "%{[AAI][vserver.vserver-name]}" } + } + } + if [AAI][complex.city] { + mutate { + add_field => { "locationCity" => "%{[AAI][complex.city]}" } + } + } + if [AAI][complex.state] { + mutate { + add_field => { "locationState" => "%{[AAI][complex.state]}" } + } + } + + + # + # Adding some flags to ease aggregation + # + if [closedLoopEventStatus] =~ /(?i)ABATED/ { + mutate { + add_field => { "flagAbated" => "1" } + } + } + if [notification] =~ /^.*?(?:\b|_)FINAL(?:\b|_).*?(?:\b|_)FAILURE(?:\b|_).*?$/ { + mutate { + add_field => { "flagFinalFailure" => "1" } + } + } + + + if "error" not in [@metadata][request][tags]{ + # + # Creating data for a secondary index + # + clone { + clones => [ "event-cl-aggs" ] + add_tag => [ "event-cl-aggs" ] + } + + if "event-cl-aggs" in [@metadata][request][tags]{ + # + # we only need a few fields for aggregations; remove all fields from clone except : + # vmName,vnfName,vnfType,requestID,closedLoopAlarmStart, closedLoopControlName,closedLoopAlarmEnd,abated,nbrDmaapevents,finalFailure + # + prune { + whitelist_names => ["^@.*$","^topic$","^type$","^tags$","^flagFinalFailure$","^flagAbated$","^locationState$","^locationCity$","^vmName$","^vnfName$","^vnfType$","^requestID$","^closedLoopAlarmStart$","^closedLoopControlName$","^closedLoopAlarmEnd$","^target$","^target_type$","^triggerSourceName$","^policyScope$","^policyName$","^policyVersion$"] + } + + } + } +} + + +output { + stdout { + codec => rubydebug + } + + if "error" in [tags] { + elasticsearch { + ilm_enabled => false + codec => "json" +{{- if .Values.global.aafEnabled }} + cacert => "{{ .Values.certInitializer.credsPath }}/{{ .Values.certInitializer.clamp_ca_certs_pem }}" +{{- else }} + cacert => "/clamp-cert/ca-certs.pem" +{{- end }} + ssl_certificate_verification => false + hosts => ["${elasticsearch_base_url}"] + user => ["${logstash_user}"] + password => ["${logstash_pwd}"] + index => "errors-%{+YYYY.MM.DD}" + doc_as_upsert => true + } + + } else if "event-cl-aggs" in [tags] { + elasticsearch { + ilm_enabled => false + codec => "json" + hosts => ["${elasticsearch_base_url}"] +{{- if .Values.global.aafEnabled }} + cacert => "{{ .Values.certInitializer.credsPath }}/{{ .Values.certInitializer.clamp_ca_certs_pem }}" +{{- else }} + cacert => "/clamp-cert/ca-certs.pem" +{{- end }} + ssl_certificate_verification => false + user => ["${logstash_user}"] + password => ["${logstash_pwd}"] + document_id => "%{requestID}" + index => "events-cl-%{+YYYY.MM.DD}" # creates daily indexes for control loop + doc_as_upsert => true + action => "update" + } + + } else { + elasticsearch { + ilm_enabled => false + codec => "json" + hosts => ["${elasticsearch_base_url}"] +{{- if .Values.global.aafEnabled }} + cacert => "{{ .Values.certInitializer.credsPath }}/{{ .Values.certInitializer.clamp_ca_certs_pem }}" +{{- else }} + cacert => "/clamp-cert/ca-certs.pem" +{{- end }} + ssl_certificate_verification => false + user => ["${logstash_user}"] + password => ["${logstash_pwd}"] + index => "events-%{+YYYY.MM.DD}" # creates daily indexes + doc_as_upsert => true + } + } +} |