diff options
10 files changed, 472 insertions, 67 deletions
diff --git a/docs/oom_user_guide.rst b/docs/oom_user_guide.rst index 7340ddf7fd..7cc46a71c4 100644 --- a/docs/oom_user_guide.rst +++ b/docs/oom_user_guide.rst @@ -115,7 +115,7 @@ stable which should be removed to avoid confusion:: To prepare your system for an installation of ONAP, you'll need to:: - > git clone -b frankfurt http://gerrit.onap.org/r/oom + > git clone -b frankfurt --recurse-submodules -j2 http://gerrit.onap.org/r/oom > cd oom/kubernetes diff --git a/kubernetes/cds/charts/cds-blueprints-processor/resources/config/application.properties b/kubernetes/cds/charts/cds-blueprints-processor/resources/config/application.properties index 05ac61cf57..453f906101 100755 --- a/kubernetes/cds/charts/cds-blueprints-processor/resources/config/application.properties +++ b/kubernetes/cds/charts/cds-blueprints-processor/resources/config/application.properties @@ -127,6 +127,8 @@ blueprintsprocessor.netconfExecutor.enabled=true blueprintsprocessor.restConfExecutor.enabled=true blueprintsprocessor.cliExecutor.enabled=true blueprintsprocessor.remoteScriptCommand.enabled=true +## Enable py-executor +blueprintsprocessor.streamingRemoteExecution.enabled=true # Used in Health Check blueprintsprocessor.messageproducer.self-service-api.type=kafka-basic-auth diff --git a/kubernetes/common/mariadb-galera/resources/create-deployment.yml b/kubernetes/common/mariadb-galera/resources/create-deployment.yml new file mode 100644 index 0000000000..61bfc78945 --- /dev/null +++ b/kubernetes/common/mariadb-galera/resources/create-deployment.yml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: {{- include "common.resourceMetadata" (dict "suffix" "upgrade-deployment" "dot" .) | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ include "common.fullname" . }} + template: + metadata: + labels: + app: {{ include "common.fullname" . }} + spec: + containers: + - name: {{ include "common.name" . }} + image: "{{ include "common.repository" . }}/{{ .Values.image }}" + ports: + - containerPort: {{ .Values.service.internalPort }} + name: {{ .Values.service.portName }} + - containerPort: {{ .Values.service.sstPort }} + name: {{ .Values.service.sstPortName }} + - containerPort: {{ .Values.service.replicationPort }} + name: {{ .Values.service.replicationName }} + - containerPort: {{ .Values.service.istPort }} + name: {{ .Values.service.istPortName }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: MYSQL_USER + valueFrom: + secretKeyRef: + key: login + name: {{ include "common.fullname" . }}-temp-upgrade-usercred + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: {{ include "common.fullname" . }}-temp-upgrade-usercred + - name: MYSQL_DATABASE + value: {{ default "" .Values.config.mysqlDatabase | quote }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: {{ include "common.fullname" . }}-temp-upgrade-root + subdomain: {{ .Values.service.name }} + hostname: {{ .Values.nameOverride }}-upgrade-deployment
\ No newline at end of file diff --git a/kubernetes/common/mariadb-galera/resources/post-upgrade-script.sh b/kubernetes/common/mariadb-galera/resources/post-upgrade-script.sh new file mode 100644 index 0000000000..132ac27ea2 --- /dev/null +++ b/kubernetes/common/mariadb-galera/resources/post-upgrade-script.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +TEMP_POD=$(kubectl get pod -n $NAMESPACE_ENV --selector \ + app='{{ include "common.fullname" . }}' -o \ + jsonpath='{.items[?(@.metadata.ownerReferences[].kind=="ReplicaSet")].metadata.name}') + +tmp_MYSQL_PASSWORD=$(echo -n $(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- printenv \ + MYSQL_PASSWORD) | base64) + +tmp_ROOT_PASSWORD=$(echo -n $(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- printenv \ + MYSQL_ROOT_PASSWORD) | base64) + +FLAG_EX_ROOT_SEC='{{ include "common.secret.getSecretNameFast" (dict "global" . "uid" (include "common.mariadb.secret.rootPassUID" .)) }}' + +FLAG_EX_SEC='{{ include "common.secret.getSecretNameFast" (dict "global" . "uid" (include "common.mariadb.secret.userCredentialsUID" .)) }}' + +kubectl patch secret $FLAG_EX_ROOT_SEC -p \ + '{"data":{"password":"'"$tmp_ROOT_PASSWORD"'"}}' + +kubectl patch secret $FLAG_EX_SEC -p \ + '{"data":{"password":"'"$tmp_MYSQL_PASSWORD"'"}}' + +kubectl delete pod -n $NAMESPACE_ENV {{ include "common.fullname" . }}-0 --now +kubectl delete deployment -n $NAMESPACE_ENV {{ include "common.fullname" . }}-upgrade-deployment +kubectl delete secret -n $NAMESPACE_ENV {{ include "common.fullname" . }}-temp-upgrade-root +kubectl delete secret -n $NAMESPACE_ENV {{ include "common.fullname" . }}-temp-upgrade-usercred
\ No newline at end of file diff --git a/kubernetes/common/mariadb-galera/resources/upgrade-scripts.sh b/kubernetes/common/mariadb-galera/resources/upgrade-scripts.sh new file mode 100644 index 0000000000..ff44606e23 --- /dev/null +++ b/kubernetes/common/mariadb-galera/resources/upgrade-scripts.sh @@ -0,0 +1,101 @@ +#!/bin/bash +MYSQL_USER=$(kubectl exec -n $NAMESPACE_ENV \ + {{ include "common.fullname" . }}-0 -- printenv MYSQL_USER) + +MYSQL_PASSWORD=$(kubectl exec -n $NAMESPACE_ENV \ + {{ include "common.fullname" . }}-0 -- printenv MYSQL_PASSWORD) + +MYSQL_ROOT_PASSWORD=$(kubectl exec -n $NAMESPACE_ENV \ + {{ include "common.fullname" . }}-0 -- printenv MYSQL_ROOT_PASSWORD) + +kubectl create secret generic \ + '{{ include "common.fullname" . }}'-temp-upgrade-root \ + --from-literal=password=$MYSQL_ROOT_PASSWORD + +kubectl create secret generic \ + '{{ include "common.fullname" . }}'-temp-upgrade-usercred \ + --from-literal=login=$MYSQL_USER --from-literal=password=$MYSQL_PASSWORD + +kubectl create -f /upgrade/create-deployment.yml + +TEMP_POD=$(kubectl get pod -n $NAMESPACE_ENV --selector \ + app='{{ include "common.fullname" . }}' -o \ + jsonpath='{.items[?(@.metadata.ownerReferences[].kind=="ReplicaSet")].metadata.name}') + +CLUSTER_NO=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- \ + mysql --skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \ + -p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';" | \ + awk '{print $2}') + +CLUSTER_STATE=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- \ + mysql --skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \ + -p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';" \ + | awk '{print $2}') + +STS_REPLICA=$(kubectl get statefulsets -n $NAMESPACE_ENV \ + {{ include "common.fullname" . }} -o jsonpath='{.status.replicas}') + +DEPLOYMENT_REPLICA=$(kubectl get deployment -n $NAMESPACE_ENV \ + {{ include "common.fullname" . }}-upgrade-deployment -o \ + jsonpath='{.status.replicas}') + +while [[ ! $CLUSTER_NO == $((STS_REPLICA+DEPLOYMENT_REPLICA)) ]] \ + || [[ ! $CLUSTER_STATE == "Synced" ]] +do + echo "$CLUSTER_NO and $CLUSTER_STATE" + CLUSTER_NO=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- mysql \ + --skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \ + -p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';" \ + | awk '{print $2}') + CLUSTER_STATE=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- mysql \ + --skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \ + -p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';" \ + | awk '{print $2}') + sleep 2 + if [[ $CLUSTER_NO == $((STS_REPLICA+DEPLOYMENT_REPLICA)) ]] \ + && [[ $CLUSTER_STATE == "Synced" ]] + then + echo "The cluster has $CLUSTER_NO members and $CLUSTER_STATE state." + break + fi +done + +MYSQL_STATUS=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- mysqladmin \ + -uroot -p$MYSQL_ROOT_PASSWORD ping) + +while [[ ! $MYSQL_STATUS == "mysqld is alive" ]] +do + echo "Mariadb deployment is not ready yet." + sleep 2 + MYSQL_STATUS=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- mysqladmin \ + -uroot -p$MYSQL_ROOT_PASSWORD ping) + if [[ $MYSQL_STATUS == "mysqld is alive" ]] + then + echo "Mariadb deployment is ready." + break + fi +done + +kubectl scale statefulsets {{ include "common.fullname" . }} --replicas=0 +MY_REPLICA_NUMBER=$(kubectl get statefulsets -n $NAMESPACE_ENV \ + {{ include "common.fullname" . }} -o jsonpath='{.status.replicas}') +echo "The the cluster has $MY_REPLICA_NUMBER replicas." + +while [[ ! $MY_REPLICA_NUMBER == "0" ]] +do + echo "The cluster is not scaled to 0 yet. Please wait ..." + MY_REPLICA_NUMBER=$(kubectl get statefulsets -n $NAMESPACE_ENV \ + {{ include "common.fullname" . }} -o jsonpath='{.status.replicas}') + echo "The current status of the cluster is $MY_REPLICA_NUMBER" + sleep 2 + if [[ $MY_REPLICA_NUMBER == "0" ]] + then + break + fi +done + +for (( index=0; index<$STS_REPLICA; index+=1 )) +do + kubectl delete pvc \ + "{{ include "common.fullname" . }}-data-{{ include "common.fullname" . }}-$index" +done diff --git a/kubernetes/common/mariadb-galera/templates/configmap.yaml b/kubernetes/common/mariadb-galera/templates/configmap.yaml index a7064d7ce4..685901fa95 100644 --- a/kubernetes/common/mariadb-galera/templates/configmap.yaml +++ b/kubernetes/common/mariadb-galera/templates/configmap.yaml @@ -1,6 +1,6 @@ {{/* # Copyright © 2018 Amdocs, Bell Canada -# Copyright © 2020 Samsung Electronics +# Copyright © 2020 Samsung Electronics, and TATA Communications # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. */}} - {{- if .Values.externalConfig }} apiVersion: v1 kind: ConfigMap @@ -43,3 +42,37 @@ metadata: heritage: {{ .Release.Service }} data: {{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.fullname" . }}-upgrade-deployment + annotations: + "helm.sh/hook": "pre-upgrade" + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": hook-succeeded + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{ tpl (.Files.Glob "resources/*").AsConfig . | indent 2 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.fullname" . }}-post-upgrade-deployment + annotations: + "helm.sh/hook": "post-upgrade" + "helm.sh/hook-weight": "0" + "helm.sh/hook-delete-policy": hook-succeeded + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{ tpl (.Files.Glob "resources/post-upgrade-script.sh").AsConfig . | indent 2 }} diff --git a/kubernetes/common/mariadb-galera/templates/job.yaml b/kubernetes/common/mariadb-galera/templates/job.yaml new file mode 100644 index 0000000000..cc71bb855c --- /dev/null +++ b/kubernetes/common/mariadb-galera/templates/job.yaml @@ -0,0 +1,107 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "common.fullname" . }}-pre-upgrade + annotations: + "helm.sh/hook": "pre-upgrade" + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + template: + spec: + securityContext: + fsGroup: 1001 + runAsUser: 1001 + containers: + - name: mariadb-job-pre-upgrade + image: {{ .Values.global.kubectlImage}} + imagePullPolicy: IfNotPresent + env: + - name: NAMESPACE_ENV + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + command: ["/bin/bash", "-c", "--"] + args: ["/upgrade/upgrade-scripts.sh"] + volumeMounts: + - name: config-mariadb-upgrade + mountPath: /upgrade + volumes: + - name: config-mariadb-upgrade + configMap: + name: {{ include "common.fullname" . }}-upgrade-deployment + defaultMode: 0777 + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "common.fullname" . }}-post-upgrade + annotations: + "helm.sh/hook": "post-upgrade" + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + template: + spec: + securityContext: + fsGroup: 1001 + runAsUser: 0 + initContainers: + - image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}" + name: mariadb-galera-upgrade-readiness + env: + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + command: + - /root/ready.py + args: + - --container-name + - mariadb-galera + containers: + - name: mariadb-job-post-upgrade + image: {{ .Values.global.kubectlImage}} + imagePullPolicy: IfNotPresent + env: + - name: NAMESPACE_ENV + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + command: ["/bin/bash", "-c", "--"] + args: ["/upgrade/post-upgrade-script.sh"] + volumeMounts: + - name: config-mariadb-upgrade + mountPath: /upgrade + volumes: + - name: config-mariadb-upgrade + configMap: + name: {{ include "common.fullname" . }}-post-upgrade-deployment + defaultMode: 0777 + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "common.fullname" . }}-post-delete + annotations: + "helm.sh/hook": "post-delete" + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + template: + spec: + containers: + - name: mariadb-job-post-delete + image: {{ .Values.global.kubectlImage}} + imagePullPolicy: IfNotPresent + command: ["/bin/bash", "-c", "--"] + args: + - for ((index=0;index<{{ $.Values.replicaCount }};index+=1)); + do kubectl delete pvc "{{ include "common.fullname" . }}-data-{{ include "common.fullname" . }}-$index"; + done; kubectl delete deployment {{ include "common.fullname" . }}-upgrade-deployment; + restartPolicy: OnFailure diff --git a/kubernetes/common/mariadb-galera/values.yaml b/kubernetes/common/mariadb-galera/values.yaml index af08ea3d58..4ccb0e5c6e 100644 --- a/kubernetes/common/mariadb-galera/values.yaml +++ b/kubernetes/common/mariadb-galera/values.yaml @@ -42,7 +42,10 @@ global: readinessRepository: oomk8s readinessImage: readiness-check:2.0.2 - + busyboxImage: busybox:1.30 + busyboxRepository: docker.io + # kubeclt image + kubectlImage: "bitnami/kubectl:1.15" ################################################################# # Application configuration defaults. diff --git a/kubernetes/onap/values.yaml b/kubernetes/onap/values.yaml index dcbfd6d0cf..e60925f21f 100755 --- a/kubernetes/onap/values.yaml +++ b/kubernetes/onap/values.yaml @@ -58,6 +58,16 @@ global: # logging agent - temporary repo until images migrated to nexus3 loggingRepository: docker.elastic.co + # dockerHub main repository + dockerHubRepository: docker.io + + # busybox repo and image + busyboxRepository: docker.io + busyboxImage: busybox:1.30 + + # kubeclt image + kubectlImage: "bitnami/kubectl:1.15" + # image pull policy pullPolicy: Always diff --git a/kubernetes/sdnc/resources/config/log/org.ops4j.pax.logging.cfg b/kubernetes/sdnc/resources/config/log/org.ops4j.pax.logging.cfg index 117a663dd4..46c5db0ea9 100644 --- a/kubernetes/sdnc/resources/config/log/org.ops4j.pax.logging.cfg +++ b/kubernetes/sdnc/resources/config/log/org.ops4j.pax.logging.cfg @@ -17,8 +17,10 @@ # ################################################################################ -# Common pattern layout for appenders -log4j2.pattern = %d{ISO8601} | %-5p | %-16t | %-32c{1} | %X{bundle.id} - %X{bundle.name} - %X{bundle.version} | %X{currentGraph} - %X{nodeId} | %m%n +# Properties used as default values in MDC +log4j2.property.ServiceName = INTERNAL +log4j2.property.ErrorCode = 900 +log4j2.property.ErrorDesc = UnknownError # Common properties @@ -33,23 +35,18 @@ metricsLogName=metrics auditLogName=audit debugLogName=debug - -# Root logger -log4j2.rootLogger.level = INFO -# uncomment to use asynchronous loggers, which require mvn:com.lmax/disruptor/3.3.2 library -#log4j2.rootLogger.type = asyncRoot -#log4j2.rootLogger.includeLocation = false -log4j2.rootLogger.appenderRef.RollingFile.ref = RollingFile +log4j2.rootLogger.appenderRef.KarafFile.ref = KarafFile log4j2.rootLogger.appenderRef.PaxOsgi.ref = PaxOsgi log4j2.rootLogger.appenderRef.Console.ref = Console +log4j2.rootLogger.appenderRef.DebugFile.ref = DebugFile +log4j2.rootLogger.appenderRef.ErrorFile.ref = ErrorFile log4j2.rootLogger.appenderRef.Console.filter.threshold.type = ThresholdFilter log4j2.rootLogger.appenderRef.Console.filter.threshold.level = ${karaf.log.console:-OFF} -# Loggers configuration - -# Spifly logger -log4j2.logger.spifly.name = org.apache.aries.spifly -log4j2.logger.spifly.level = WARN +log4j2.bundle.info = %X{bundle.id} - %.50X{bundle.name} - %X{bundle.version} +# Veracode: Address Improper Output Neutralization for Logs CWE ID 117 flaw +# \\R matches any new line character, any new line character will replaced with space (stripped) +log4j2.pattern = %d{ISO8601} | %-5p | %-16t | %-32c{1} | ${log4j2.bundle.info} | %X{currentGraph} - %X{nodeId} | %replace{%m}{\\R}{ }%n # Security audit logger log4j2.logger.audit.name = org.apache.karaf.jaas.modules.audit @@ -65,74 +62,150 @@ log4j2.appender.console.name = Console log4j2.appender.console.layout.type = PatternLayout log4j2.appender.console.layout.pattern = ${log4j2.pattern} -# Rolling file appender -log4j2.appender.rolling.type = RollingRandomAccessFile -log4j2.appender.rolling.name = RollingFile -log4j.appender.rolling.level = INFO -log4j2.appender.rolling.fileName = ${logDirectory}/${karafLogName}.log -log4j2.appender.rolling.filePattern = ${logDirectory}/${karafLogName}.log.%i -# uncomment to not force a disk flush -#log4j2.appender.rolling.immediateFlush = false -log4j2.appender.rolling.append = true -log4j2.appender.rolling.layout.type = PatternLayout -log4j2.appender.rolling.layout.pattern = ${log4j2.pattern} -log4j2.appender.rolling.policies.type = Policies -log4j2.appender.rolling.policies.size.type = SizeBasedTriggeringPolicy -log4j2.appender.rolling.policies.size.size = ${maxFileSize} - -# Audit file appender - not currently used, so commented out -log4j2.appender.audit.type = RollingRandomAccessFile -log4j2.appender.audit.name = AuditRollingFile -log4j2.appender.audit.fileName = ${logDirectory}/${auditLogName}.log -log4j2.appender.audit.filePattern = ${logDirectory}/${auditLogName}.log.%i -log4j2.appender.audit.append = true -log4j2.appender.audit.layout.type = PatternLayout -log4j2.appender.audit.layout.pattern = ${log4j2.pattern} -log4j2.appender.audit.policies.type = Policies -log4j2.appender.audit.policies.size.type = SizeBasedTriggeringPolicy -log4j2.appender.audit.policies.size.size = ${maxFileSize} - # OSGi appender log4j2.appender.osgi.type = PaxOsgi log4j2.appender.osgi.name = PaxOsgi log4j2.appender.osgi.filter = * - -#ECOMP Debug appender +# KarafFile appender +log4j2.appender.karaf.type = RollingRandomAccessFile +log4j2.appender.karaf.name = KarafFile +log4j2.appender.karaf.fileName = ${logDirectory}/log/karaf.log +log4j2.appender.karaf.filePattern = ${logDirectory}/log/karaf.log.%i +# uncomment to not force a disk flush +#log4j2.appender.karaf.immediateFlush = false +log4j2.appender.karaf.append = true +log4j2.appender.karaf.layout.type = PatternLayout +log4j2.appender.karaf.layout.pattern = ${log4j2.pattern} +log4j2.appender.karaf.policies.type = Policies +log4j2.appender.karaf.policies.size.type = SizeBasedTriggeringPolicy +log4j2.appender.karaf.policies.size.size = ${maxFileSize} +log4j2.appender.karaf.strategy.type = DefaultRolloverStrategy +log4j2.appender.karaf.strategy.max = ${maxBackupIndex} +log4j2.appender.karaf.strategy.fileIndex = min +# Routing appender (log4j sift) +log4j2.appender.routing.type = Routing +log4j2.appender.routing.name = Routing +log4j2.appender.routing.routes.type = Routes +log4j2.appender.routing.routes.pattern = \$\$\\\{ctx:bundle.name\} +log4j2.appender.routing.routes.bundle.type = Route +log4j2.appender.routing.routes.bundle.appender.type = RollingRandomAccessFile +log4j2.appender.routing.routes.bundle.appender.name = Bundle-\$\\\{ctx:bundle.name\} +log4j2.appender.routing.routes.bundle.appender.fileName = ${logDirectory}/log/bundle-\$\\\{ctx:bundle.name\}.log +log4j2.appender.routing.routes.bundle.appender.filePattern= ${logDirectory}/log/bundle-\$\\\{ctx:bundle.name\}.log.%i +log4j2.appender.routing.routes.bundle.appender.append = true +log4j2.appender.routing.routes.bundle.appender.layout.type = PatternLayout +log4j2.appender.routing.routes.bundle.appender.layout.pattern = ${log4j2.pattern} +log4j2.appender.routing.routes.bundle.appender.policies.type = Policies +log4j2.appender.routing.routes.bundle.appender.policies.size.type = SizeBasedTriggeringPolicy +log4j2.appender.routing.routes.bundle.appender.policies.size.size = ${maxFileSize} + +#ecomp logging standards log4j2.appender.debug.type = RollingRandomAccessFile -log4j2.appender.debug.name = DebugRollingFile -log4j2.appender.debug.fileName = ${logDirectory}/${debugLogName}.log -log4j2.appender.debug.filePattern = ${logDirectory}/${debugLogName}.log.%i +log4j2.appender.debug.name = DebugFile +log4j2.appender.debug.fileName = <%= @logDir %>/debug.log +log4j2.appender.debug.filePattern = <%= @logDir %>/debug.log.%i +# uncomment to not force a disk flush +#log4j2.appender.debug.immediateFlush = false log4j2.appender.debug.append = true log4j2.appender.debug.layout.type = PatternLayout -log4j2.appender.debug.layout.pattern = ${log4j2.pattern} +log4j2.appender.debug.layout.pattern = %d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%X{RequestID}|%-16.16t|%-5.5p|%-32.32c{1}|${log4j2.bundle.info}|%replace{%m}{\\R}{ }%n log4j2.appender.debug.policies.type = Policies log4j2.appender.debug.policies.size.type = SizeBasedTriggeringPolicy log4j2.appender.debug.policies.size.size = ${maxFileSize} +log4j2.appender.debug.strategy.type = DefaultRolloverStrategy +log4j2.appender.debug.strategy.max = ${maxBackupIndex} +log4j2.appender.debug.strategy.fileIndex = min -#Error appender log4j2.appender.error.type = RollingRandomAccessFile -log4j2.appender.error.name = ErrorRollingFile -log4j2.appender.error.fileName = ${logDirectory}/${errorLogName}.log -log4j2.appender.error.filePattern = ${logDirectory}/${errorLogName}.log.%i +log4j2.appender.error.name = ErrorFile +log4j2.appender.error.fileName = <%= @logDir %>/error.log +log4j2.appender.error.filePattern = <%= @logDir %>/error.log.%i +# uncomment to not force a disk flush +#log4j2.appender.error.immediateFlush = false log4j2.appender.error.append = true log4j2.appender.error.layout.type = PatternLayout -log4j2.appender.error.layout.pattern = ${log4j2.pattern} +log4j2.appender.error.layout.pattern = %d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%X{RequestID}|%-16.16t|$\$\\\{ctx:ServiceName\}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%p|$\$\\\{ctx:ErrorCode\}|$\$\\\{ctx:ErrorDesc\}|%replace{%m}{\\R}{ }%ex{full}{separator(\\n)}%n + log4j2.appender.error.policies.type = Policies log4j2.appender.error.policies.size.type = SizeBasedTriggeringPolicy log4j2.appender.error.policies.size.size = ${maxFileSize} +log4j2.appender.error.strategy.type = DefaultRolloverStrategy +log4j2.appender.error.strategy.max = ${maxBackupIndex} +log4j2.appender.error.strategy.fileIndex = min +log4j2.appender.error.filter.threshold.type = ThresholdFilter +log4j2.appender.error.filter.threshold.level = WARN +log4j2.appender.error.filter.threshold.match = ACCEPT + + + +log4j2.appender.metric.type = RollingRandomAccessFile +log4j2.appender.metric.name = MetricFile +log4j2.appender.metric.fileName = <%= @logDir %>/metric.log +log4j2.appender.metric.filePattern = <%= @logDir %>/metric.log.%i +# uncomment to not force a disk flush +#log4j2.appender.metric.immediateFlush = false +log4j2.appender.metric.append = true +log4j2.appender.metric.layout.type = PatternLayout +log4j2.appender.metric.layout.pattern=%X{InvokeTimestamp}|%X{LogTimestamp}|%X{RequestID}|%X{ServiceInstanceID}|%-16.16t|%X{ServerFQDN}|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDesc}|%X{InstanceID}|%p|%X{Severity}|<%= @hostIP %>|%X{ElapsedTime}|<%= @hostFQDN %>|%X{ClientIPAddress}|%C{1}|||%X{TargetElement}|%X{slf4j.marker}|%X|%X{currentGraph} - %X{nodeId}|${log4j2.bundle.info}|%m%n +log4j2.appender.metric.policies.type = Policies +log4j2.appender.metric.policies.size.type = SizeBasedTriggeringPolicy +log4j2.appender.metric.policies.size.size = ${maxFileSize} +log4j2.appender.metric.strategy.type = DefaultRolloverStrategy +log4j2.appender.metric.strategy.max = 100 +log4j2.appender.metric.strategy.fileIndex = min -#Metrics appender - not used so commented out -#log4j2.appender.metrics.type = RollingRandomAccessFile -#log4j2.appender.metrics.name = MetricsRollingFile -#log4j2.appender.metrics.fileName = ${logDirectory}/${metricsLogName}.log -#log4j2.appender.metrics.filePattern = ${logDirectory}/${metricsLogName}.log.%i -#log4j2.appender.metrics.append = true -#log4j2.appender.metrics.layout.type = PatternLayout -#log4j2.appender.metrics.layout.pattern = ${log4j2.pattern} -#log4j2.appender.metrics.policies.type = Policies -#log4j2.appender.metrics.policies.size.type = SizeBasedTriggeringPolicy -#log4j2.appender.metrics.policies.size.size = ${maxFileSize} +log4j2.appender.audit.type = RollingRandomAccessFile +log4j2.appender.audit.name = AuditFile +log4j2.appender.audit.fileName = <%= @logDir %>/audit.log +log4j2.appender.audit.filePattern = <%= @logDir %>/audit.log.%i +# uncomment to not force a disk flush +#log4j2.appender.audit.immediateFlush = false +log4j2.appender.audit.append = true +log4j2.appender.audit.layout.type = PatternLayout +log4j2.appender.audit.layout.pattern=%X{EntryTimestamp}|%X{LogTimestamp}|%X{RequestID}|%X{ServiceInstanceID}|%-16.16t|%X{ServerFQDN}|%X{ServiceName}|%X{PartnerName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDesc}|%X{InstanceID}|INFO|%X{Severity}|<%= @hostIP %>|%X{ElapsedTime}|<%= @hostFQDN %>|%X{ClientIPAddress}|%C{1}|%X{AUDIT-Unused}|%X{AUDIT-ProcessKey}|%X{slf4j.marker}|%X|%X{currentGraph} - %X{nodeId}|${log4j2.bundle.info}|%m%n +log4j2.appender.audit.policies.type = Policies +log4j2.appender.audit.policies.size.type = SizeBasedTriggeringPolicy +log4j2.appender.audit.policies.size.size = ${maxFileSize} +log4j2.appender.audit.strategy.type = DefaultRolloverStrategy +log4j2.appender.audit.strategy.max = ${maxBackupIndex} +log4j2.appender.audit.strategy.fileIndex = min + +# Loggers + +log4j2.logger.metric.name = org.onap.ccsdk.sli.core.filters.metric +log4j2.logger.metric.level = INFO +log4j2.logger.metric.additivity = false +log4j2.logger.metric.appenderRef.MetricFile.ref = MetricFile + +log4j2.logger.metric2.name = org.onap.logging.filter.base.AbstractBaseMetricLogFilter +log4j2.logger.metric2.level = INFO +log4j2.logger.metric2.additivity = false +log4j2.logger.metric2.appenderRef.MetricFile.ref = MetricFile + +log4j2.logger.audit.name = org.onap.logging.filter.base.AbstractAuditLogFilter +log4j2.logger.audit.level = INFO +log4j2.logger.audit.additivity = false +log4j2.logger.audit.appenderRef.AuditFile.ref = AuditFile + +log4j2.logger.metric.name = org.onap.ccsdk.sli.core.filters.metric +log4j2.logger.metric.level = INFO +log4j2.logger.metric.additivity = false +log4j2.logger.metric.appenderRef.MetricFile.ref = MetricFile + +log4j2.logger.metric2.name = org.onap.logging.filter.base.AbstractBaseMetricLogFilter +log4j2.logger.metric2.level = INFO +log4j2.logger.metric2.additivity = false +log4j2.logger.metric2.appenderRef.MetricFile.ref = MetricFile + +log4j2.logger.audit.name = org.onap.logging.filter.base.AbstractAuditLogFilter +log4j2.logger.audit.level = INFO +log4j2.logger.audit.additivity = false +log4j2.logger.audit.appenderRef.AuditFile.ref = AuditFile +log4j2.logger.rr.name = org.onap.logging.filter.base.PayloadLoggingServletFilter +log4j2.logger.rr.level = INFO +log4j2.logger.rr.additivity = false +log4j2.logger.rr.appenderRef.RequestResponseFile.ref = RequestResponseFile
\ No newline at end of file |