diff options
author | ac2550 <ac2550@intl.att.com> | 2018-04-18 14:23:17 +0200 |
---|---|---|
committer | ac2550 <ac2550@intl.att.com> | 2018-05-15 17:43:47 +0200 |
commit | 8ac97179592231b6c82be7429f6fe632a3b92b00 (patch) | |
tree | 104c1bb11eca5759b2ff5f0cf2cf62639619d186 /kubernetes/clamp/charts/clamp-dash-logstash/resources | |
parent | 4f2c852f14d585235a6631261157ecdf5de62351 (diff) |
Add clamp dashboard charts
Change-Id: I851750c2c394fad4c5187e2a18cfa0460a16c729
Issue-ID: CLAMP-154
Signed-off-by: ac2550 <ac2550@intl.att.com>
Diffstat (limited to 'kubernetes/clamp/charts/clamp-dash-logstash/resources')
-rw-r--r-- | kubernetes/clamp/charts/clamp-dash-logstash/resources/config/logstash.yml | 16 | ||||
-rw-r--r-- | kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf | 108 |
2 files changed, 124 insertions, 0 deletions
diff --git a/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/logstash.yml b/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/logstash.yml new file mode 100644 index 0000000000..3ddf63f9cc --- /dev/null +++ b/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/logstash.yml @@ -0,0 +1,16 @@ +http.host: "0.0.0.0" +## Path where pipeline configurations reside +path.config: /usr/share/logstash/pipeline + +## Type of queue : memeory based or file based +#queue.type: persisted +## Size of queue +#queue.max_bytes: 1024mb +## Setting true makes logstash check periodically for change in pipeline configurations +config.reload.automatic: true + +## xpack configurations +#xpack.monitoring.elasticsearch.url: ["http://10.247.186.12:9200", "http://10.247.186.13:9200"] +#xpack.monitoring.elasticsearch.username: elastic +#xpack.monitoring.elasticsearch.password: changeme +xpack.monitoring.enabled: false diff --git a/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf b/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf new file mode 100644 index 0000000000..aa087e3445 --- /dev/null +++ b/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf @@ -0,0 +1,108 @@ +input { + http_poller { + urls => { + event_queue => { + method => get + url => "${dmaap_base_url}/events/${event_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000" + headers => { + Accept => "application/json" + } + add_field => { "topic" => "${event_topic}" } + } + notification_queue => { + method => get + url => "${dmaap_base_url}/events/${notification_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000" + headers => { + Accept => "application/json" + } + add_field => { "topic" => "${notification_topic}" } + } + request_queue => { + method => get + url => "${dmaap_base_url}/events/${request_topic}/${dmaap_consumer_group}/${dmaap_consumer_id}?timeout=15000" + headers => { + Accept => "application/json" + } + add_field => { "topic" => "${request_topic}" } + } + } + socket_timeout => 30 + request_timeout => 30 + interval => 60 + codec => "plain" + } +} + +filter { + # avoid noise if no entry in the list + if [message] == "[]" { + drop { } + } + + # parse json, split the list into multiple events, and parse each event + json { + source => "[message]" + target => "message" + } + split { + field => "message" + } + json { + source => "message" + } + mutate { remove_field => [ "message" ] } + # express timestamps in milliseconds instead of microseconds + ruby { + code => "event.set('closedLoopAlarmStart', Integer(event.get('closedLoopAlarmStart')))" + } + date { + match => [ "closedLoopAlarmStart", UNIX_MS ] + target => "closedLoopAlarmStart" + } + + if [closedLoopAlarmEnd] { + ruby { + code => "event.set('closedLoopAlarmEnd', Integer(event.get('closedLoopAlarmEnd')))" + } + date { + match => [ "closedLoopAlarmEnd", UNIX_MS ] + target => "closedLoopAlarmEnd" + } + + } + #"yyyy-MM-dd HH:mm:ss" + if [notificationTime] { + mutate { + gsub => [ + "notificationTime", " ", "T" + ] + } + date { + match => [ "notificationTime", ISO8601 ] + target => "notificationTime" + } + } +} +output { + stdout { + codec => rubydebug + } + + if [http_request_failure] { + elasticsearch { + codec => "json" + hosts => ["${elasticsearch_base_url}"] + index => "errors-%{+YYYY.MM.DD}" + doc_as_upsert => true + } + } else { + elasticsearch { + codec => "json" + hosts => ["${elasticsearch_base_url}"] + index => "logstash-%{+YYYY.MM.DD}" # creates daily indexes + doc_as_upsert => true + + } + } + +} |