aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralkac <alkac@amdocs.com>2017-08-08 22:59:10 +0530
committeralkac <alkac@amdocs.com>2017-08-09 14:32:30 +0530
commit6efd571748cb8c4d5e83032e54026a452add827e (patch)
tree3fdb7ddb0f28ab2d8b7fce85a8318315113ca704
parent39e98f94f334d18968061c7eed4f0bdde30898eb (diff)
[LOG-34]Config of Logstash Indexing for ONAP Logs
Change-Id: I115893b01fee180896607285023090a65c0b4946 Signed-off-by: alkac <alkac@amdocs.com>
-rw-r--r--elasticstack/logstash/conf/README.md42
-rw-r--r--elasticstack/logstash/conf/logstash.yml16
-rw-r--r--elasticstack/logstash/conf/onap-pipeline.conf203
-rw-r--r--elasticstack/logstash/conf/onap-pipeline.properties57
4 files changed, 318 insertions, 0 deletions
diff --git a/elasticstack/logstash/conf/README.md b/elasticstack/logstash/conf/README.md
new file mode 100644
index 0000000..fe423bb
--- /dev/null
+++ b/elasticstack/logstash/conf/README.md
@@ -0,0 +1,42 @@
+#Logstash canonical configuration
+----------------------------------
+This logstash configuration is to sanitize logback/log4j logs from ONAP components. It also includes the deployment settings for logstash environment.
+
+#Files provided for logstash deployment
+---------------------------------------
+Logstash provides two types of configurations
+1. logstash.yml [Logstash Settings]
+2. onap-pipeline.conf [Pipeline Configurations]
+
+3. onap-pipeline.properties
+
+#Instructions for deployment
+----------------------------
+1. Create canonical path /etc/onap/logstash/conf.d/ on the host on which the logstash has to be installed.
+2. Save the logstash.yml at location created in step 1.
+3. Create canonical path /etc/onap/logstash/conf.d/pipeline/ on the host.
+4. The onap-pipeline.conf is parameterized and has tokens instead of values for certain deployment specific parameteres (like port, elastic host etc.). These tokens has a syntax as '$[a-zA-Z_]+'.
+5. These tokens are listed in another file onap-pipeline.properties. These properties are also provided with commented description about them in the file itself. These tokens have to be replaced with the appropriate values as per the deployment environment before deployment.
+6. Now, save the final onap-pipeline.conf at location created in step 3.
+7. Following is the list of specifications for logstash container creation-
+
+ Image - 'docker.elastic.co/logstash/logstash:5.4.3' available in the Elastic Docker Registry.
+
+ Port mapping - The onap-pipeline.conf specifies the port on which logstash listens for events from filebeats. It is defined as a parameter 'port' in the beats section of input configuration. The container should publish the same port with the host port which is configured in the file.
+
+ Example - If the logstash listens on port 5044 specified in onap-pipeline.conf as -
+ input {
+ beats {
+ port => 5044
+ }
+ }
+ Then the container port 5044 should be published to host port 5044.
+
+
+ Volume mount - The logstash container must have two host directories mapped as volume in the container
+ 1. Host path - /etc/onap/logstash/conf.d/logstash.yml mapped to
+ Container path - /usr/share/logstash/config/logstash.yml
+
+ 2. Host path - /etc/onap/logstash/conf.d/pipeline/ mapped to
+ Container path - /usr/share/logstash/pipeline/
+8. onap-pipeline.properties need not be deployed after the values from it are used. \ No newline at end of file
diff --git a/elasticstack/logstash/conf/logstash.yml b/elasticstack/logstash/conf/logstash.yml
new file mode 100644
index 0000000..f658006
--- /dev/null
+++ b/elasticstack/logstash/conf/logstash.yml
@@ -0,0 +1,16 @@
+http.host: "0.0.0.0"
+## Path where pipeline configurations reside
+path.config: /usr/share/logstash/pipeline
+
+## Type of queue : memeory based or file based
+#queue.type: persisted
+## Size of queue
+#queue.max_bytes: 1024mb
+## Setting true makes logstash check periodically for change in pipeline configurations
+config.reload.automatic: true
+
+## xpack configurations
+#xpack.monitoring.elasticsearch.url: ["http://10.247.186.12:9200", "http://10.247.186.13:9200"]
+#xpack.monitoring.elasticsearch.username: elastic
+#xpack.monitoring.elasticsearch.password: changeme
+xpack.monitoring.enabled: false
diff --git a/elasticstack/logstash/conf/onap-pipeline.conf b/elasticstack/logstash/conf/onap-pipeline.conf
new file mode 100644
index 0000000..a18b216
--- /dev/null
+++ b/elasticstack/logstash/conf/onap-pipeline.conf
@@ -0,0 +1,203 @@
+input {
+ beats {
+
+ ## Add a id to plugin configuration. Can be anything unique.
+ id => 'beats_plugin'
+
+ ######## Connection configurations ########
+
+ ## The port to listen on.
+ port => $filebeat_port
+
+ ## Close Idle clients after the specified time in seconds. Default is 60 seconds
+ #client_inactivity_timeout => 60
+
+ ######## Security configurations ########
+
+ ## Enable encryption. Default false.
+ #ssl => $filebeat_ssl
+
+ ## ssl certificate path.
+ #ssl_certificate => $filebeat_ssl_certificate
+
+ ## SSL key to use.
+ #ssl_key => $filebeat_ssl_key
+
+ ##SSL key passphrase to use.
+ #ssl_key_passphrase => $filebeat_ssl_key_passphrase
+
+ ## Value can be any of: none, peer, force_peer.
+ #ssl_verify_mode => $filebeat_ssl_verify_mode
+
+ ## Time in milliseconds for an incomplete ssl handshake to timeout. Default is 10000 ms.
+ #ssl_handshake_timeout => 10000
+ include_codec_tag => false
+ }
+}
+
+
+filter {
+ # Filter for log4j xml events
+ if "</log4j:event>" in [message] {
+ #Filter to parse xml event and retrieve data
+ xml {
+ source => "message"
+ store_xml => false
+ remove_namespaces => true
+ target => "xml_content"
+ xpath => [ "/event/message/text()", "logmsg" ,
+ "/event/@logger", "Logger",
+ "/event/@timestamp", "Timestamp",
+ "/event/@level", "loglevel",
+ "/event/@thread", "Thread",
+ "/event/throwable/text()", "Exceptionthrowable",
+ "/event/NDC/text()", "NDCs",
+ "/event/properties/data/@name","mdcname",
+ "/event/properties/data/@value","mdcvalue"]
+
+ }
+
+ #Ruby filter to iterate and separate MDCs into documents
+ ruby {
+ code => '
+ $i = 0
+ $num = 0
+ if event.get("[mdcname]")
+ $num = event.get("[mdcname]").length
+ end
+ if $num != 0
+ until $i > $num do
+ if event.get("[mdcname]").at($i) and event.get("[mdcvalue]").at($i)
+ event.set(event.get("[mdcname]").at($i), event.get("[mdcvalue]").at($i))
+ end
+ $i=$i+1
+ end
+ end
+ '
+ }
+
+ #Validations
+ if [Exceptionthrowable]
+ {
+ mutate {
+ replace => {
+ "exceptionmessage" => "%{[Exceptionthrowable]}"
+ }
+ }
+ }
+
+ if [NDCs]
+ {
+ mutate {
+ replace => {
+ "NDC" => "%{[NDCs]}"
+ }
+ }
+ }
+
+ mutate {
+ replace => {
+ "Logger" =>"%{[Logger]}"
+ "logmsg" =>"%{[logmsg]}"
+ "Timestamp" =>"%{[Timestamp]}"
+ "loglevel" =>"%{[loglevel]}"
+ "message" => "%{logmsg}"
+ "Thread" => "%{[Thread]}"
+ }
+ remove_field => ["mdcname", "mdcvalue", "logmsg","Exceptionthrowable","NDCs"]
+ }
+
+ if [Timestamp]
+ {
+ date {
+ match => ["Timestamp", "UNIX_MS"]
+ target => "Timestamp"
+ }
+ }
+ }
+ # Filter for logback events
+ else {
+ mutate {
+ gsub => [
+ 'message', '= ', '=null',
+ 'message', '=\t', '=null ', #This null is followed by a tab
+ 'message', '\t$', '\t'
+ ]
+ }
+ kv {
+ field_split => "\t"
+ trim_key => "\s"
+ trim_value => "\s"
+ }
+ grok {
+ break_on_match => false
+ match => {
+ "message" => "%{TIMESTAMP_ISO8601:Timestamp}\t%{GREEDYDATA:Thread}\t%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}\t%{JAVACLASS:Logger}\t(?:[^\t]+\t)*%{GREEDYDATA:message}"
+ }
+ overwrite => ["message"]
+ }
+ }
+}
+
+
+output {
+ elasticsearch {
+ id => 'onap_es'
+
+ ######### Security configurations #########
+
+ user => $es_user
+ password => $es_password
+
+ ## The .cer or .pem file to validate the server’s certificate
+ #cacert => $es_cacert
+
+ ## The keystore used to present a certificate to the server. It can be either .jks or .p12
+ #keystore => $es_keystore
+ #keystore_password => $es_keystore_password
+
+ ## Enable SSL/TLS secured communication to Elasticsearch cluster.
+ ## Default is not set which in that case depends on the protocol specidfied in hosts list
+ #ssl => $es_ssl
+
+ ## Option to validate the server’s certificate. Default is true
+ #ssl_certificate_verification => $es_ssl_certificate_verification
+
+ ## The JKS truststore to validate the server’s certificate.
+ #truststore => $es_truststore
+ #truststore_password => $es_truststore_password
+
+
+ ######### Elasticsearchcluster and host configurations #########
+
+#can specify one or a list of hosts. If sniffing is set, one is enough and others will be auto-discovered
+##Also protocol can be specified like ["http://10.247.186.12:9200"]
+ hosts => ["http://elasticsearch.onap:9200"]
+
+
+ ## This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. Default is false.
+ sniffing => true
+
+ ## How long to wait, in seconds, between sniffing attempts. Default is 5 seconds.
+ #sniffing_delay => 5
+
+ ## Set the address of a forward HTTP proxy.
+ #proxy => $es_proxy
+
+ ##Use this if you must run Elasticsearch behind a proxy that remaps the root path for the Elasticsearch HTTP API lives
+ #path => $es_path
+
+ ######### Elasticsearch request configurations #########
+
+ ## This setting defines the maximum sized bulk request Logstash will make.
+ #flush_size => ?
+
+ ######### Document configurations #########
+
+ index => "onaplogs-%{+YYYY.MM.dd}"
+ document_type => "logs"
+
+ ## This can be used to associate child documents with a parent using the parent ID.
+ #parent => "abcd'
+ }
+} \ No newline at end of file
diff --git a/elasticstack/logstash/conf/onap-pipeline.properties b/elasticstack/logstash/conf/onap-pipeline.properties
new file mode 100644
index 0000000..c411122
--- /dev/null
+++ b/elasticstack/logstash/conf/onap-pipeline.properties
@@ -0,0 +1,57 @@
+######### Filebeat input plugin configurations #########
+
+## The port to listen on for filebeat events.
+filebeat_port = 5044
+
+## Enable encryption. Default false.
+#filebeat_ssl = true
+
+## ssl certificate path.
+#filebeat_ssl_certificate = "/etc/ssl/private/server.crt"
+
+## SSL key to use.
+#filebeat_ssl_key = "/etc/ssl/private/server.key"
+
+##SSL key passphrase to use.
+#filebeat_ssl_key_passphrase = "abcd"
+
+## Value can be any of: none, peer, force_peer.
+#filebeat_ssl_verify_mode = force_peer
+
+######### Elasticsearch output plugin configurations #########
+
+### ES Security configurations ###
+
+es_user = "elastic"
+es_password = "changeme"
+
+## Enable SSL/TLS secured communication to Elasticsearch cluster.
+## Default is not set which in that case depends on the protocol specified in hosts list
+#es_ssl = true
+
+## The .cer or .pem file to validate the server’s certificate
+#es_cacert = "/etc/pki/client/cert.pem"
+
+## The keystore used to present a certificate to the server. It can be either .jks or .p12
+#es_keystore = "/etc/pki/client/key.p12"
+#es_keystore_password = "abcd"
+
+## Option to validate the server’s certificate. Default is true
+#es_ssl_certificate_verification = true
+
+## The JKS truststore to validate the server’s certificate.
+#es_truststore = "/etc/pki/client/cacerts.jks"
+#es_truststore_password = "abcd"
+
+
+### Elasticsearchcluster and host configurations ###
+
+#can specify one or a list of hosts. If sniffing is set, one is enough and others will be auto-discovered
+##Also protocol can be specified like ["http://10.247.186.12:9200"]
+es_hosts = ["10.247.186.12:9200"]
+
+## Set the address of a forward HTTP proxy.
+#es_proxy = "https://genproxy.amdocs.com:8080"
+
+##Use this if you must run Elasticsearch behind a proxy that remaps the root path for the Elasticsearch HTTP API lives
+#es_path = ?? \ No newline at end of file