summaryrefslogtreecommitdiffstats
path: root/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf
diff options
context:
space:
mode:
authorac2550 <ac2550@intl.att.com>2018-09-20 14:57:22 +0200
committerAnaël Closson <ac2550@intl.att.com>2018-10-09 15:59:49 +0000
commita0496b0f40c37b9a5c4747a58f34dbd47a27f1ed (patch)
tree2854c4b18e2bef63437ac58c84786e8543a3a2db /kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf
parentb1cc7ae3d635d0d54bb57928dbf85398fa414196 (diff)
Migrate ELK to 6.1.3 OSS
+ add logstash configuration for a 2nd aggregated index + use new restore scripts for restoring dashboards Change-Id: I9176fa3b551591fdaf968e5b2496d510581aca22 Issue-ID: CLAMP-222 Signed-off-by: ac2550 <ac2550@intl.att.com>
Diffstat (limited to 'kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf')
-rw-r--r--kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf220
1 files changed, 164 insertions, 56 deletions
diff --git a/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf b/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf
index f88e40da14..317b428c77 100644
--- a/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf
+++ b/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf
@@ -1,4 +1,4 @@
-# Copyright © 2018 AT&T, Amdocs, Bell Canada Intellectual Property. All rights reserved.
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
input {
- http_poller {
+ http_poller {
urls => {
event_queue => {
method => get
@@ -20,8 +20,8 @@ input {
headers => {
Accept => "application/json"
}
- add_field => { "topic" => "${event_topic}" }
- type => "dmaap_event"
+ topic => "${event_topic}"
+ tags => [ "dmaap_source" ]
}
notification_queue => {
method => get
@@ -29,8 +29,8 @@ input {
headers => {
Accept => "application/json"
}
- add_field => { "topic" => "${notification_topic}" }
- type => "dmaap_notification"
+ topic => "${notification_topic}"
+ tags => [ "dmaap_source" ]
}
request_queue => {
method => get
@@ -38,66 +38,91 @@ input {
headers => {
Accept => "application/json"
}
- add_field => { "topic" => "${request_topic}" }
- type => "dmaap_request"
+ topic => "${request_topic}"
+ tags => [ "dmaap_source" ]
}
}
socket_timeout => 30
request_timeout => 30
- interval => 60
+ schedule => { "every" => "1m" }
codec => "plain"
- }
+ cacert => "/certs.d/aafca.pem"
+ }
}
+
filter {
- if [type] != "dmaap_log" {
- # avoid noise if no entry in the list
- if [message] == "[]" {
- drop { }
- }
+ # avoid noise if no entry in the list
+ if [message] == "[]" {
+ drop { }
+ }
+
+ if [http_request_failure] or [@metadata][code] != "200" {
+ mutate {
+ add_tag => [ "error" ]
+ }
+ }
- # parse json, split the list into multiple events, and parse each event
+ if "dmaap_source" in [tags] {
+ #
+ # Dmaap provides a json list, whose items are Strings containing the event
+ # provided to Dmaap, which itself is an escaped json.
+ #
+ # We first need to parse the json as we have to use the plaintext as it cannot
+ # work with list of events, then split that list into multiple string events,
+ # that we then transform into json.
+ #
json {
- source => "[message]"
- target => "message"
+ source => "[message]"
+ target => "message"
}
+ ruby {
+ code => "
+ for ev in event.get('message', [])
+ ev.set('@metadata', event.get('@metadata'))
+ end
+ "
+ }
+
split {
- field => "message"
- add_field => {
- "type" => "%{type}"
- "topic" => "%{topic}"
- }
+ field => "message"
}
json {
- source => "message"
+ source => "message"
+ }
+ mutate {
+ remove_field => [ "message" ]
}
- mutate { remove_field => [ "message" ] }
}
-
- # express timestamps in milliseconds instead of microseconds
- ruby {
+
+ #
+ # Some timestamps are expressed as milliseconds, some are in microseconds
+ #
+ if [closedLoopAlarmStart] {
+ ruby {
code => "
- if event.get('closedLoopAlarmStart').to_s.to_i(10) > 9999999999999
- event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10) / 1000)
- else
- event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10))
- end
- "
- }
- date {
- match => [ "closedLoopAlarmStart", UNIX_MS ]
- target => "closedLoopAlarmStart"
+ if event.get('closedLoopAlarmStart').to_s.to_i(10) > 9999999999999
+ event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10) / 1000)
+ else
+ event.set('closedLoopAlarmStart', event.get('closedLoopAlarmStart').to_s.to_i(10))
+ end
+ "
+ }
+ date {
+ match => [ "closedLoopAlarmStart", UNIX_MS ]
+ target => "closedLoopAlarmStart"
+ }
}
if [closedLoopAlarmEnd] {
ruby {
code => "
- if event.get('closedLoopAlarmEnd').to_s.to_i(10) > 9999999999999
- event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10) / 1000)
- else
- event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10))
- end
- "
+ if event.get('closedLoopAlarmEnd').to_s.to_i(10) > 9999999999999
+ event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10) / 1000)
+ else
+ event.set('closedLoopAlarmEnd', event.get('closedLoopAlarmEnd').to_s.to_i(10))
+ end
+ "
}
date {
match => [ "closedLoopAlarmEnd", UNIX_MS ]
@@ -105,39 +130,122 @@ filter {
}
}
- #"yyyy-MM-dd HH:mm:ss"
+
+
+ #
+ # Notification time are expressed under the form "yyyy-MM-dd HH:mm:ss", which
+ # is close to ISO8601, but lacks of T as spacer: "yyyy-MM-ddTHH:mm:ss"
+ #
if [notificationTime] {
- mutate {
- gsub => [
- "notificationTime", " ", "T"
- ]
- }
- date {
+ mutate {
+ gsub => [
+ "notificationTime", " ", "T"
+ ]
+ }
+ date {
match => [ "notificationTime", ISO8601 ]
target => "notificationTime"
- }
+ }
+ }
+
+
+ #
+ # Renaming some fields for readability
+ #
+ if [AAI][generic-vnf.vnf-name] {
+ mutate {
+ add_field => { "vnfName" => "%{[AAI][generic-vnf.vnf-name]}" }
+ }
+ }
+ if [AAI][generic-vnf.vnf-type] {
+ mutate {
+ add_field => { "vnfType" => "%{[AAI][generic-vnf.vnf-type]}" }
+ }
+ }
+ if [AAI][vserver.vserver-name] {
+ mutate {
+ add_field => { "vmName" => "%{[AAI][vserver.vserver-name]}" }
+ }
+ }
+ if [AAI][complex.city] {
+ mutate {
+ add_field => { "locationCity" => "%{[AAI][complex.city]}" }
+ }
+ }
+ if [AAI][complex.state] {
+ mutate {
+ add_field => { "locationState" => "%{[AAI][complex.state]}" }
+ }
+ }
+
+
+ #
+ # Adding some flags to ease aggregation
+ #
+ if [closedLoopEventStatus] =~ /(?i)ABATED/ {
+ mutate {
+ add_field => { "flagAbated" => "1" }
+ }
+ }
+ if [notification] =~ /^.*?(?:\b|_)FINAL(?:\b|_).*?(?:\b|_)FAILURE(?:\b|_).*?$/ {
+ mutate {
+ add_field => { "flagFinalFailure" => "1" }
+ }
+ }
+
+
+ if "error" not in [tags] {
+ #
+ # Creating data for a secondary index
+ #
+ clone {
+ clones => [ "event-cl-aggs" ]
+ add_tag => [ "event-cl-aggs" ]
+ }
+
+ if "event-cl-aggs" in [tags] {
+ #
+ # we only need a few fields for aggregations; remove all fields from clone except :
+ # vmName,vnfName,vnfType,requestID,closedLoopAlarmStart, closedLoopControlName,closedLoopAlarmEnd,abated,nbrDmaapevents,finalFailure
+ #
+ prune {
+ whitelist_names => ["^@.*$","^topic$","^type$","^tags$","^flagFinalFailure$","^flagAbated$","^locationState$","^locationCity$","^vmName$","^vnfName$","^vnfType$","^requestID$","^closedLoopAlarmStart$","^closedLoopControlName$","^closedLoopAlarmEnd$","^target$","^target_type$","^triggerSourceName$","^policyScope$","^policyName$","^policyVersion$"]
+ }
+
+ }
}
}
+
+
output {
stdout {
codec => rubydebug
}
- if [http_request_failure] {
+ if "error" in [tags] {
elasticsearch {
codec => "json"
hosts => ["${elasticsearch_base_url}"]
index => "errors-%{+YYYY.MM.DD}"
doc_as_upsert => true
}
- } else {
+
+ } else if "event-cl-aggs" in [tags] {
elasticsearch {
codec => "json"
hosts => ["${elasticsearch_base_url}"]
- index => "logstash-%{+YYYY.MM.DD}" # creates daily indexes
+ document_id => "%{requestID}"
+ index => "events-cl-%{+YYYY.MM.DD}" # creates daily indexes for control loop
doc_as_upsert => true
+ action => "update"
+ }
+ } else {
+ elasticsearch {
+ codec => "json"
+ hosts => ["${elasticsearch_base_url}"]
+ index => "events-%{+YYYY.MM.DD}" # creates daily indexes
+ doc_as_upsert => true
}
}
-
}