summaryrefslogtreecommitdiffstats
path: root/kubernetes/log/resources
diff options
context:
space:
mode:
Diffstat (limited to 'kubernetes/log/resources')
-rw-r--r--kubernetes/log/resources/elasticsearch/conf/elasticsearch.yml140
-rw-r--r--kubernetes/log/resources/kibana/conf/kibana.yml116
-rw-r--r--kubernetes/log/resources/logstash/conf/logstash.yml16
-rw-r--r--kubernetes/log/resources/logstash/pipeline/onap-pipeline.conf204
4 files changed, 476 insertions, 0 deletions
diff --git a/kubernetes/log/resources/elasticsearch/conf/elasticsearch.yml b/kubernetes/log/resources/elasticsearch/conf/elasticsearch.yml
new file mode 100644
index 0000000000..de92a584e9
--- /dev/null
+++ b/kubernetes/log/resources/elasticsearch/conf/elasticsearch.yml
@@ -0,0 +1,140 @@
+# ======================== Elasticsearch Configuration =========================
+#
+# NOTE: Elasticsearch comes with reasonable defaults for most settings.
+# Before you set out to tweak and tune the configuration, make sure you
+# understand what are you trying to accomplish and the consequences.
+#
+# The primary way of configuring a node is via this file. This template lists
+# the most important settings you may want to configure for a production cluster.
+#
+# Please consult the documentation for further information on configuration options:
+# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
+#
+# ---------------------------------- Cluster -----------------------------------
+#
+# Name of the Elasticsearch cluster.
+# A node can only join a cluster when it shares its cluster.name with all the other nodes in the cluster.
+# The default name is elasticsearch, but you should change it to an appropriate name which describes the
+# purpose of the cluster.
+#
+cluster.name: "onap-log"
+#
+# The port that other nodes in the cluster should use when communicating with this node.
+# Required for Elasticsearch's nodes running on different cluster nodes.
+# More : https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html
+transport.publish_port:$transport.publish_port
+#
+# The host address to publish for nodes in the cluster to connect to.
+# Required for Elasticsearch's nodes running on different cluster nodes.
+# More : https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html
+transport.publish_host:$transport.publish_host
+#
+# ------------------------------------ Node ------------------------------------
+#
+# It is better to provide different meaningfull names fot different elastic nodes.
+# By default, Elasticsearch will take the 7 first character of the randomly generated uuid used as the node id.
+# Note that the node id is persisted and does not change when a node restarts
+#
+#node.name: $node.name
+#
+# Add custom attributes to the node:
+#
+#node.attr.rack: r1
+#
+# ----------------------------------- Paths ------------------------------------
+#
+# The location of the data files of each index / shard allocated on the node. Can hold multiple locations separated by coma.
+# In production, we should not keep this default to "/elasticsearch/data", as on upgrading Elasticsearch, directory structure
+# may change & can deal to data loss.
+path.data: /usr/share/elasticsearch/data
+#
+# Elasticsearch's log files location. In production, we should not keep this default to "/elasticsearch/logs",
+# as on upgrading Elasticsearch, directory structure may change.
+path.logs: /usr/share/elasticsearch/logs
+#
+# ----------------------------------- Memory -----------------------------------
+#
+# It is vitally important to the health of your node that none of the JVM is ever swapped out to disk.
+# Lock the memory on startup.
+#
+bootstrap.memory_lock: true
+#
+# Make sure that the heap size is set to about half the memory available
+# on the system and that the owner of the process is allowed to use this
+# limit.
+#
+# Elasticsearch performs poorly when the system is swapping the memory.
+#
+# ---------------------------------- Network -----------------------------------
+#
+# Set the bind address to a specific IP (IPv4 or IPv6):
+# In order to communicate and to form a cluster with nodes on other servers, your node will need to bind to a
+# non-loopback address.
+network.host: 0.0.0.0
+#
+# Set a custom port for HTTP: If required, default is 9200-9300
+#
+#http.port: $http.port
+#
+# For more information, consult the network module documentation.
+#
+# --------------------------------- Discovery ----------------------------------
+#
+# Pass an initial list of hosts to perform discovery when new node is started:
+# To form a cluster with nodes on other servers, you have to provide a seed list of other nodes in the cluster
+# that are likely to be live and contactable.
+# By default, Elasticsearch will bind to the available loopback addresses and will scan ports 9300 to 9305 to try
+# to connect to other nodes running on the same server.
+#
+discovery.zen.ping.unicast.hosts: ["elasticsearch.onap-log"]
+#$discovery.zen.ping.unicast.hosts
+#
+# This setting tells Elasticsearch to not elect a master unless there are enough master-eligible nodes
+# available. Only then will an election take place.
+# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1):
+discovery.zen.minimum_master_nodes: 1
+#
+# For more information, consult the zen discovery module documentation.
+#
+# ---------------------------------- Gateway -----------------------------------
+#
+# Block initial recovery after a full cluster restart until N nodes are started:
+#
+#gateway.recover_after_nodes: 3
+#
+# For more information, consult the gateway module documentation.
+#
+# ---------------------------------- Various -----------------------------------
+#
+# Require explicit names when deleting indices:
+#
+#action.destructive_requires_name: true
+# Set a custom port for HTTP: If required, default is 9200-9300
+# This is used for REST APIs
+http.port: 9200
+# Port to bind for communication between nodes. Accepts a single value or a range.
+# If a range is specified, the node will bind to the first available port in the range.
+# Defaults to 9300-9400.
+# More info:
+transport.tcp.port: 9300
+
+xpack.graph.enabled: false
+#Set to false to disable X-Pack graph features.
+
+xpack.ml.enabled: false
+#Set to false to disable X-Pack machine learning features.
+
+xpack.monitoring.enabled: false
+#Set to false to disable X-Pack monitoring features.
+
+xpack.reporting.enabled: false
+#Set to false to disable X-Pack reporting features.
+
+xpack.security.enabled: false
+#Set to false to disable X-Pack security features.
+
+xpack.watcher.enabled: false
+#Set to false to disable Watcher.
+
+index.number_of_replicas: 0
+
diff --git a/kubernetes/log/resources/kibana/conf/kibana.yml b/kubernetes/log/resources/kibana/conf/kibana.yml
new file mode 100644
index 0000000000..0f72daa497
--- /dev/null
+++ b/kubernetes/log/resources/kibana/conf/kibana.yml
@@ -0,0 +1,116 @@
+xpack.graph.enabled: false
+#Set to false to disable X-Pack graph features.
+xpack.ml.enabled: false
+#Set to false to disable X-Pack machine learning features.
+xpack.monitoring.enabled: false
+#Set to false to disable X-Pack monitoring features.
+xpack.reporting.enabled: false
+#Set to false to disable X-Pack reporting features.
+xpack.security.enabled: false
+#Set to false to disable X-Pack security features.
+xpack.watcher.enabled: false
+#Set to false to disable Watcher.
+# Kibana is served by a back end server. This setting specifies the port to use.
+server.port: 5601
+
+# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
+# The default is 'localhost', which usually means remote machines will not be able to connect.
+# To allow connections from remote users, set this parameter to a non-loopback address.
+server.host: "0"
+
+# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This only affects
+# the URLs generated by Kibana, your proxy is expected to remove the basePath value before forwarding requests
+# to Kibana. This setting cannot end in a slash.
+#server.basePath: ""
+
+# The maximum payload size in bytes for incoming server requests.
+#server.maxPayloadBytes: 1048576
+
+# The Kibana server's name. This is used for display purposes.
+server.name: "Kibana"
+
+# The URL of the Elasticsearch instance to use for all your queries.
+elasticsearch.url: "http://elasticsearch.onap-log:9200"
+#elasticsearch-service.onap-log:9200"
+#elasticsearch.url: "http://10.247.47.3:9200"
+# When this setting's value is true Kibana uses the hostname specified in the server.host
+# setting. When the value of this setting is false, Kibana uses the hostname of the host
+# that connects to this Kibana instance.
+#elasticsearch.preserveHost: true
+
+# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
+# dashboards. Kibana creates a new index if the index doesn't already exist.
+#kibana.index: ".kibana"
+
+# The default application to load.
+#kibana.defaultAppId: "discover"
+
+# If your Elasticsearch is protected with basic authentication, these settings provide
+# the username and password that the Kibana server uses to perform maintenance on the Kibana
+# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
+# is proxied through the Kibana server.
+elasticsearch.username: "elastic"
+elasticsearch.password: "changeme"
+# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
+# These settings enable SSL for outgoing requests from the Kibana server to the browser.
+#server.ssl.enabled: $server_ssl_enabled
+#server.ssl.certificate: $server_ssl_certificate
+#server.ssl.key: $server_ssl_key
+
+# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
+# These files validate that your Elasticsearch backend uses the same key files.
+#elasticsearch.ssl.certificate: $elasticsearch_ssl_certificate
+#elasticsearch.ssl.key: $elasticsearch_ssl_key
+
+# Optional setting that enables you to specify a path to the PEM file for the certificate
+# authority for your Elasticsearch instance.
+#elasticsearch.ssl.certificateAuthorities: $elasticsearch_ssl_certificateAuthorities
+
+# To disregard the validity of SSL certificates, change this setting's value to 'none'.
+#elasticsearch.ssl.verificationMode: $elasticsearch_ssl_verificationMode
+
+# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
+# the elasticsearch.requestTimeout setting.
+#elasticsearch.pingTimeout: 1500
+
+# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
+# must be a positive integer.
+#elasticsearch.requestTimeout: 30000
+
+# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
+# headers, set this value to [] (an empty list).
+#elasticsearch.requestHeadersWhitelist: [ authorization ]
+
+# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
+# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
+#elasticsearch.customHeaders: {}
+
+# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
+#elasticsearch.shardTimeout: 0
+
+# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
+#elasticsearch.startupTimeout: 5000
+
+# Specifies the path where Kibana creates the process ID file.
+#pid.file: /var/run/kibana.pid
+
+# Enables you specify a file where Kibana stores log output.
+#logging.dest: stdout
+
+# Set the value of this setting to true to suppress all logging output.
+#logging.silent: false
+
+# Set the value of this setting to true to suppress all logging output other than error messages.
+#logging.quiet: false
+
+# Set the value of this setting to true to log all events, including system usage information
+# and all requests.
+#logging.verbose: false
+
+# Set the interval in milliseconds to sample system and process performance
+# metrics. Minimum is 100ms. Defaults to 5000.
+#ops.interval: 5000
+
+# The default locale. This locale can be used in certain circumstances to substitute any missing
+# translations.
+#i18n.defaultLocale: "en"
diff --git a/kubernetes/log/resources/logstash/conf/logstash.yml b/kubernetes/log/resources/logstash/conf/logstash.yml
new file mode 100644
index 0000000000..f658006503
--- /dev/null
+++ b/kubernetes/log/resources/logstash/conf/logstash.yml
@@ -0,0 +1,16 @@
+http.host: "0.0.0.0"
+## Path where pipeline configurations reside
+path.config: /usr/share/logstash/pipeline
+
+## Type of queue : memeory based or file based
+#queue.type: persisted
+## Size of queue
+#queue.max_bytes: 1024mb
+## Setting true makes logstash check periodically for change in pipeline configurations
+config.reload.automatic: true
+
+## xpack configurations
+#xpack.monitoring.elasticsearch.url: ["http://10.247.186.12:9200", "http://10.247.186.13:9200"]
+#xpack.monitoring.elasticsearch.username: elastic
+#xpack.monitoring.elasticsearch.password: changeme
+xpack.monitoring.enabled: false
diff --git a/kubernetes/log/resources/logstash/pipeline/onap-pipeline.conf b/kubernetes/log/resources/logstash/pipeline/onap-pipeline.conf
new file mode 100644
index 0000000000..1a46375047
--- /dev/null
+++ b/kubernetes/log/resources/logstash/pipeline/onap-pipeline.conf
@@ -0,0 +1,204 @@
+input {
+ beats {
+
+ ## Add a id to plugin configuration. Can be anything unique.
+ id => 'beats_plugin'
+
+ ######## Connection configurations ########
+
+ ## The port to listen on.
+ port => 5044
+
+ ## Close Idle clients after the specified time in seconds. Default is 60 seconds
+ #client_inactivity_timeout => 60
+
+ ######## Security configurations ########
+
+ ## Enable encryption. Default false.
+ #ssl => $filebeat_ssl
+
+ ## ssl certificate path.
+ #ssl_certificate => $filebeat_ssl_certificate
+
+ ## SSL key to use.
+ #ssl_key => $filebeat_ssl_key
+
+ ##SSL key passphrase to use.
+ #ssl_key_passphrase => $filebeat_ssl_key_passphrase
+
+ ## Value can be any of: none, peer, force_peer.
+ #ssl_verify_mode => $filebeat_ssl_verify_mode
+
+ ## Time in milliseconds for an incomplete ssl handshake to timeout. Default is 10000 ms.
+ #ssl_handshake_timeout => 10000
+ include_codec_tag => false
+ }
+}
+
+
+filter {
+ # Filter for log4j xml events
+ if "</log4j:event>" in [message] {
+ #Filter to parse xml event and retrieve data
+ xml {
+ source => "message"
+ store_xml => false
+ remove_namespaces => true
+ target => "xml_content"
+ xpath => [ "/event/message/text()", "logmsg" ,
+ "/event/@logger", "Logger",
+ "/event/@timestamp", "Timestamp",
+ "/event/@level", "loglevel",
+ "/event/@thread", "Thread",
+ "/event/throwable/text()", "Exceptionthrowable",
+ "/event/NDC/text()", "NDCs",
+ "/event/properties/data/@name","mdcname",
+ "/event/properties/data/@value","mdcvalue"]
+
+ }
+
+ #Ruby filter to iterate and separate MDCs into documents
+ ruby {
+ code => '
+ $i = 0
+ $num = 0
+ if event.get("[mdcname]")
+ $num = event.get("[mdcname]").length
+ end
+ if $num != 0
+ until $i > $num do
+ if event.get("[mdcname]").at($i) and event.get("[mdcvalue]").at($i)
+ event.set(event.get("[mdcname]").at($i), event.get("[mdcvalue]").at($i))
+ end
+ $i=$i+1
+ end
+ end
+ '
+ }
+
+ #Validations
+ if [Exceptionthrowable]
+ {
+ mutate {
+ replace => {
+ "exceptionmessage" => "%{[Exceptionthrowable]}"
+ }
+ }
+ }
+
+ if [NDCs]
+ {
+ mutate {
+ replace => {
+ "NDC" => "%{[NDCs]}"
+ }
+ }
+ }
+
+ mutate {
+ replace => {
+ "Logger" =>"%{[Logger]}"
+ "logmsg" =>"%{[logmsg]}"
+ "Timestamp" =>"%{[Timestamp]}"
+ "loglevel" =>"%{[loglevel]}"
+ "message" => "%{logmsg}"
+ "Thread" => "%{[Thread]}"
+ }
+ remove_field => ["mdcname", "mdcvalue", "logmsg","Exceptionthrowable","NDCs"]
+ }
+
+ if [Timestamp]
+ {
+ date {
+ match => ["Timestamp", "UNIX_MS"]
+ target => "Timestamp"
+ }
+ }
+ }
+ # Filter for logback events
+ else {
+ mutate {
+ gsub => [
+ 'message', '= ', '=null',
+ 'message', '=\t', '=null ', #This null is followed by a tab
+ 'message', '\t$', '\t'
+ ]
+ }
+ kv {
+ field_split => "\t"
+ trim_key => "\s"
+ trim_value => "\s"
+ }
+ grok {
+ break_on_match => false
+ match => {
+ "message" => "%{TIMESTAMP_ISO8601:Timestamp}\t%{GREEDYDATA:Thread}\t%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}\t%{JAVACLASS:Logger}\t(?:[^\t]+\t)*%{GREEDYDATA:message}"
+ }
+ overwrite => ["message"]
+ }
+ }
+}
+
+
+output {
+ elasticsearch {
+ id => 'onap_es'
+
+ ######### Security configurations #########
+
+ user => "elastic"
+ password => "changeme"
+
+ ## The .cer or .pem file to validate the server’s certificate
+ #cacert => $es_cacert
+
+ ## The keystore used to present a certificate to the server. It can be either .jks or .p12
+ #keystore => $es_keystore
+ #keystore_password => $es_keystore_password
+
+ ## Enable SSL/TLS secured communication to Elasticsearch cluster.
+ ## Default is not set which in that case depends on the protocol specidfied in hosts list
+ #ssl => $es_ssl
+
+ ## Option to validate the server’s certificate. Default is true
+ #ssl_certificate_verification => $es_ssl_certificate_verification
+
+ ## The JKS truststore to validate the server’s certificate.
+ #truststore => $es_truststore
+ #truststore_password => $es_truststore_password
+
+
+ ######### Elasticsearchcluster and host configurations #########
+
+#can specify one or a list of hosts. If sniffing is set, one is enough and others will be auto-discovered
+##Also protocol can be specified like ["http://10.247.186.12:9200"]
+ hosts => ["http://elasticsearch.onap-log:9200"]
+
+
+ ## This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. Default is false.
+ sniffing => true
+
+ ## How long to wait, in seconds, between sniffing attempts. Default is 5 seconds.
+ #sniffing_delay => 5
+
+ ## Set the address of a forward HTTP proxy.
+ #proxy => $es_proxy
+
+ ##Use this if you must run Elasticsearch behind a proxy that remaps the root path for the Elasticsearch HTTP API lives
+ #path => $es_path
+
+ ######### Elasticsearch request configurations #########
+
+ ## This setting defines the maximum sized bulk request Logstash will make.
+ #flush_size => ?
+
+ ######### Document configurations #########
+
+ index => "onaplogs-%{+YYYY.MM.dd}"
+ document_type => "logs"
+
+ ## This can be used to associate child documents with a parent using the parent ID.
+ #parent => "abcd'
+ }
+}
+