aboutsummaryrefslogtreecommitdiffstats
path: root/kubernetes
diff options
context:
space:
mode:
Diffstat (limited to 'kubernetes')
-rw-r--r--kubernetes/aaf/charts/aaf-sms/Chart.yaml (renamed from kubernetes/aaf/charts/sms/Chart.yaml)2
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/Chart.yaml18
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml27
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/pv.yaml37
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/pvc.yaml (renamed from kubernetes/aaf/charts/sms/charts/vault/templates/pvc.yaml)0
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml70
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml83
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/Chart.yaml (renamed from kubernetes/aaf/charts/sms/charts/vault/Chart.yaml)2
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/configmap.yaml (renamed from kubernetes/aaf/charts/sms/charts/vault/templates/configmap.yaml)0
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/pv.yaml37
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/pvc.yaml48
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/service.yaml (renamed from kubernetes/aaf/charts/sms/templates/service.yaml)0
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/statefulset.yaml (renamed from kubernetes/aaf/charts/sms/charts/vault/templates/statefulset.yaml)0
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/values.yaml (renamed from kubernetes/aaf/charts/sms/charts/vault/values.yaml)6
-rw-r--r--kubernetes/aaf/charts/aaf-sms/templates/configmap.yaml (renamed from kubernetes/aaf/charts/sms/templates/configmap.yaml)0
-rw-r--r--kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml (renamed from kubernetes/aaf/charts/sms/templates/deployment.yaml)7
-rw-r--r--kubernetes/aaf/charts/aaf-sms/templates/pv.yaml (renamed from kubernetes/aaf/charts/sms/charts/vault/templates/pv.yaml)0
-rw-r--r--kubernetes/aaf/charts/aaf-sms/templates/pvc.yaml48
-rw-r--r--kubernetes/aaf/charts/aaf-sms/templates/service.yaml (renamed from kubernetes/aaf/charts/sms/charts/vault/templates/service.yaml)0
-rw-r--r--kubernetes/aaf/charts/aaf-sms/values.yaml (renamed from kubernetes/aaf/charts/sms/values.yaml)9
-rw-r--r--kubernetes/aai/charts/aai-babel/resources/config/auth/auth_policy.json6
-rw-r--r--kubernetes/aai/charts/aai-babel/resources/config/auth/tomcat_keystorebin17136 -> 2214 bytes
-rw-r--r--kubernetes/aai/charts/aai-babel/resources/config/babel-auth.properties2
-rw-r--r--kubernetes/aai/charts/aai-babel/resources/config/logback.xml179
-rw-r--r--kubernetes/aai/charts/aai-babel/templates/deployment.yaml5
-rw-r--r--kubernetes/aai/charts/aai-babel/values.yaml4
-rw-r--r--kubernetes/aai/charts/aai-cassandra/templates/statefulset.yaml2
-rw-r--r--kubernetes/aai/charts/aai-cassandra/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-champ/resources/config/dynamic/conf/champ-beans.xml5
-rw-r--r--kubernetes/aai/charts/aai-modelloader/resources/config/log/logback.xml311
-rw-r--r--kubernetes/aai/charts/aai-modelloader/resources/config/model-loader.properties2
-rw-r--r--kubernetes/aai/charts/aai-modelloader/templates/deployment.yaml7
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/aai.properties87
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/auth/csp-cookie-filter.properties12
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/auth/inventory-ui-keystorebin7201 -> 0 bytes
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/auth/tomcat_keystorebin0 -> 2214 bytes
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-filter-aggregation.xml7
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-prepareSchema.xml11
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-subscriptionService.xml7
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-unified-search.xml7
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-unifiedFilterRequest.xml7
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/descriptors/aaiEntityNodeDescriptors.json218
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/elasticsearch.properties72
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/filters/aaiui_filters.json79
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/filters/aaiui_views.json21
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/filters/subscription_object_inspector_mapping.json16
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/log/logback.xml370
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/portal/BOOT-INF/classes/portal.properties (renamed from kubernetes/aai/charts/aai-sparky-be/resources/config/portal/portal.properties)14
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/portal/portal-authentication.properties28
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/portal/roles.config6
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/roles.config10
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/autoSuggestMappings.json10
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/autoSuggestSettings.json21
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/dynamicMappings.json14
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/entityCountHistoryMappings.json16
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/es_mappings.json32
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/es_settings.json36
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/search-service.properties32
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-application.properties11
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-http-config.properties1
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-ssl-config.properties3
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-aggregateVnfSearchProvider.xml33
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-apigw.xml31
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-gizmo.xml29
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml373
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-viewInspect.xml55
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-viewInspectSearchProvider.xml34
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core.xml241
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/suggestive-search.properties27
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/synchronizer.properties33
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/templates/configmap.yaml51
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml97
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/templates/service.yaml2
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/values.yaml21
-rw-r--r--kubernetes/aai/values.yaml26
-rwxr-xr-xkubernetes/appc/resources/config/appc/opt/onap/appc/bin/installAppcDb.sh4
-rwxr-xr-xkubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh37
-rw-r--r--kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaiclient.properties169
-rw-r--r--kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties18
-rwxr-xr-xkubernetes/appc/resources/config/appc/opt/onap/ccsdk/bin/installSdncDb.sh2
-rw-r--r--kubernetes/appc/templates/pv.yaml130
-rw-r--r--kubernetes/appc/templates/statefulset.yaml22
-rw-r--r--kubernetes/appc/values.yaml29
-rw-r--r--kubernetes/clamp/templates/configmap.yaml2
-rw-r--r--kubernetes/clamp/values.yaml2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json4
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json6
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json4
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json4
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json4
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/log-kibana.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/log-logstash.json20
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/msb-health.json16
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/mso-health.json28
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json15
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json8
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh6
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh4
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh4
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh4
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh4
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh4
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh4
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/so-api-script.sh (renamed from kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh)2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/so-camunda-script.sh (renamed from kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh)2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/so-jra-script.sh (renamed from kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh)2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/so-mariadb-script.sh (renamed from kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh)4
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh4
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh2
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh4
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdc-health.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json4
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json4
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/so-health.json28
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/so-mariabdb.json15
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/vfc-health.json64
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/vid-health.json2
-rw-r--r--kubernetes/msb/resources/config/log/filebeat/filebeat.yml2
-rw-r--r--kubernetes/msb/values.yaml6
-rw-r--r--kubernetes/multicloud/charts/multicloud-ocata/templates/NOTES.txt34
-rw-r--r--kubernetes/multicloud/charts/multicloud-ocata/values.yaml3
-rw-r--r--kubernetes/multicloud/charts/multicloud-vio/templates/NOTES.txt34
-rw-r--r--kubernetes/multicloud/charts/multicloud-vio/values.yaml3
-rw-r--r--kubernetes/multicloud/charts/multicloud-windriver/templates/NOTES.txt34
-rw-r--r--kubernetes/multicloud/charts/multicloud-windriver/values.yaml3
-rw-r--r--kubernetes/multicloud/resources/config/provider-plugin.json29
-rw-r--r--kubernetes/multicloud/templates/NOTES.txt34
-rw-r--r--kubernetes/multicloud/templates/configmap.yaml8
-rw-r--r--kubernetes/multicloud/templates/deployment.yaml6
-rw-r--r--kubernetes/onap/values.yaml8
-rw-r--r--kubernetes/policy/resources/config/pe/console.conf6
-rwxr-xr-xkubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/fusion.properties2
-rwxr-xr-xkubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/openid-connect.properties6
-rwxr-xr-xkubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/portal.properties4
-rwxr-xr-xkubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/system.properties4
-rw-r--r--kubernetes/portal/charts/portal-app/templates/service.yaml6
-rw-r--r--kubernetes/portal/charts/portal-app/values.yaml5
-rw-r--r--kubernetes/portal/charts/portal-cassandra/templates/deployment.yaml2
-rw-r--r--kubernetes/portal/charts/portal-cassandra/values.yaml1
-rw-r--r--kubernetes/portal/charts/portal-mariadb/resources/config/mariadb/oom_updates.sql22
-rw-r--r--kubernetes/portal/charts/portal-mariadb/values.yaml33
-rwxr-xr-xkubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/portal.properties4
-rwxr-xr-xkubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/system.properties2
-rw-r--r--kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties2
-rwxr-xr-xkubernetes/portal/launch-onap-portal.sh70
-rw-r--r--kubernetes/portal/values.yaml5
-rwxr-xr-xkubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py1
-rwxr-xr-xkubernetes/robot/resources/config/eteshare/config/vm_properties.py8
-rw-r--r--kubernetes/sdc/charts/sdc-be/templates/deployment.yaml8
-rw-r--r--kubernetes/sdc/charts/sdc-be/templates/job.yaml1
-rw-r--r--kubernetes/sdc/charts/sdc-cs/templates/job.yaml1
-rw-r--r--kubernetes/sdc/charts/sdc-es/templates/job.yaml1
-rw-r--r--kubernetes/sdc/charts/sdc-fe/templates/deployment.yaml4
-rw-r--r--kubernetes/sdc/charts/sdc-kb/templates/deployment.yaml12
-rw-r--r--kubernetes/sdc/charts/sdc-onboarding-be/templates/deployment.yaml16
-rw-r--r--kubernetes/sdc/charts/sdc-onboarding-be/templates/job.yaml15
-rw-r--r--kubernetes/sdnc/templates/pv.yaml84
-rw-r--r--kubernetes/sdnc/templates/statefulset.yaml23
-rw-r--r--kubernetes/sdnc/values.yaml22
-rw-r--r--kubernetes/vfc/resources/config/logging/filebeat/filebeat.yml3
-rw-r--r--kubernetes/vid/templates/deployment.yaml10
-rw-r--r--kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/deployment.yaml (renamed from kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/job.yaml)24
-rw-r--r--kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/service.yaml2
-rw-r--r--kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml5
-rw-r--r--kubernetes/vnfsdk/requirements.yaml3
-rw-r--r--kubernetes/vnfsdk/templates/deployment.yaml2
-rw-r--r--kubernetes/vnfsdk/templates/service.yaml2
-rw-r--r--kubernetes/vnfsdk/values.yaml6
181 files changed, 3309 insertions, 1163 deletions
diff --git a/kubernetes/aaf/charts/sms/Chart.yaml b/kubernetes/aaf/charts/aaf-sms/Chart.yaml
index 962ef38650..71418759fb 100644
--- a/kubernetes/aaf/charts/sms/Chart.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/Chart.yaml
@@ -14,5 +14,5 @@
apiVersion: v1
description: ONAP Secret Management Service
-name: sms
+name: aaf-sms
version: 2.0.0
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/Chart.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/Chart.yaml
new file mode 100644
index 0000000000..4e279e7ced
--- /dev/null
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/Chart.yaml
@@ -0,0 +1,18 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: ONAP Secret Management Service Quorum Client
+name: aaf-sms-quorumclient
+version: 2.0.0
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml
new file mode 100644
index 0000000000..cacc368df1
--- /dev/null
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml
@@ -0,0 +1,27 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ config.json: |
+ {{ .Values.config | toJson }}
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/pv.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/pv.yaml
new file mode 100644
index 0000000000..da0949816e
--- /dev/null
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/pv.yaml
@@ -0,0 +1,37 @@
+{{/*
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}
+{{- end -}}
diff --git a/kubernetes/aaf/charts/sms/charts/vault/templates/pvc.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/pvc.yaml
index e6aacd1b96..e6aacd1b96 100644
--- a/kubernetes/aaf/charts/sms/charts/vault/templates/pvc.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/pvc.yaml
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml
new file mode 100644
index 0000000000..483d6c5f17
--- /dev/null
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml
@@ -0,0 +1,70 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ serviceName:
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+ name: {{ include "common.name" . }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ command: ["/quorumclient/bin/quorumclient"]
+ workingDir: /quorumclient/
+ volumeMounts:
+ - mountPath: /etc/localtime
+ name: localtime
+ readOnly: true
+ - mountPath: /quorumclient/config.json
+ name: {{ include "common.name" .}}
+ subPath: config.json
+ - mountPath: /quorumclient/auth
+ name: {{ include "common.fullname" . }}-auth
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 10 }}
+ {{- end -}}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 10 }}
+ {{- end }}
+ volumes:
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name : {{ include "common.name" . }}
+ configMap:
+ name: {{ include "common.fullname" . }}
+ - name: {{ include "common.fullname" . }}-auth
+ persistentVolumeClaim:
+ claimName: {{ include "common.fullname" . }}
+ imagePullSecrets:
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml
new file mode 100644
index 0000000000..d09d492137
--- /dev/null
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml
@@ -0,0 +1,83 @@
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 302
+ repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:1.1.1
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+ persistence: {}
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+# application image
+repository: nexus3.onap.org:10001
+image: onap/aaf/smsquorumclient
+pullPolicy: Always
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+# application configuration
+# Example:
+config:
+ url: "http://aaf-sms:10443"
+ cafile: "selfsignedca.pem"
+ clientcert: "server.cert"
+ clientkey: "server.key"
+ timeout: "60s"
+ disable_tls: true
+
+# default number of instances
+replicaCount: 3
+
+nodeSelector: {}
+
+affinity: {}
+
+persistence:
+ enabled: true
+ volumeReclaimPolicy: Retain
+ accessMode: ReadWriteOnce
+ size: 10Mi
+ mountPath: /dockerdata-nfs
+ mountSubPath: sms/quorum/data
+
+ingress:
+ enabled: false
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ #
+ # Example:
+ # Configure resource requests and limits
+ # ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ # Minimum memory for development is 2 CPU cores and 4GB memory
+ # Minimum memory for production is 4 CPU cores and 8GB memory
+#resources:
+# limits:
+# cpu: 2
+# memory: 4Gi
+# requests:
+# cpu: 2
+# memory: 4Gi
diff --git a/kubernetes/aaf/charts/sms/charts/vault/Chart.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/Chart.yaml
index bf1af999d4..3f0b93ea0e 100644
--- a/kubernetes/aaf/charts/sms/charts/vault/Chart.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/Chart.yaml
@@ -14,6 +14,6 @@
apiVersion: v1
description: Chart to launch Vault as SMS backend
-name: vault
+name: aaf-sms-vault
appVersion: 0.9.5
version: 2.0.0
diff --git a/kubernetes/aaf/charts/sms/charts/vault/templates/configmap.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/configmap.yaml
index daf8cadc46..daf8cadc46 100644
--- a/kubernetes/aaf/charts/sms/charts/vault/templates/configmap.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/configmap.yaml
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/pv.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/pv.yaml
new file mode 100644
index 0000000000..da0949816e
--- /dev/null
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/pv.yaml
@@ -0,0 +1,37 @@
+{{/*
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}
+{{- end -}}
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/pvc.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/pvc.yaml
new file mode 100644
index 0000000000..e6aacd1b96
--- /dev/null
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/pvc.yaml
@@ -0,0 +1,48 @@
+{{/*
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ name: {{ include "common.fullname" . }}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/kubernetes/aaf/charts/sms/templates/service.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/service.yaml
index 04e9a5a92f..04e9a5a92f 100644
--- a/kubernetes/aaf/charts/sms/templates/service.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/service.yaml
diff --git a/kubernetes/aaf/charts/sms/charts/vault/templates/statefulset.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/statefulset.yaml
index 26f03044ac..26f03044ac 100644
--- a/kubernetes/aaf/charts/sms/charts/vault/templates/statefulset.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/templates/statefulset.yaml
diff --git a/kubernetes/aaf/charts/sms/charts/vault/values.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/values.yaml
index 2bce18106f..07b8c33226 100644
--- a/kubernetes/aaf/charts/sms/charts/vault/values.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/values.yaml
@@ -24,7 +24,7 @@ global:
repository: docker.io
image:
consul: consul:1.0.6
- vault: vault:0.9.6
+ vault: vault:0.10.0
pullPolicy: Always
# flag to enable debugging - application support required
@@ -75,10 +75,8 @@ readiness:
persistence:
enabled: true
-
volumeReclaimPolicy: Retain
-
- accessMode: ReadWriteMany
+ accessMode: ReadWriteOnce
size: 2Gi
mountPath: /dockerdata-nfs
mountSubPath: sms/consul/data
diff --git a/kubernetes/aaf/charts/sms/templates/configmap.yaml b/kubernetes/aaf/charts/aaf-sms/templates/configmap.yaml
index 72ce6fbadb..72ce6fbadb 100644
--- a/kubernetes/aaf/charts/sms/templates/configmap.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/templates/configmap.yaml
diff --git a/kubernetes/aaf/charts/sms/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml
index e5381a0f0c..4235ad01af 100644
--- a/kubernetes/aaf/charts/sms/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml
@@ -31,7 +31,7 @@ spec:
release: {{ .Release.Name }}
spec:
containers:
- - image: "{{ .Values.repository }}/{{ .Values.image }}"
+ - image: "{{ include "common.repository" . }}/{{ .Values.image }}"
name: {{ include "common.name" . }}
imagePullPolicy: {{ .Values.pullPolicy }}
command: ["/sms/bin/sms"]
@@ -57,6 +57,8 @@ spec:
- mountPath: /sms/smsconfig.json
name: {{ include "common.name" .}}
subPath: smsconfig.json
+ - mountPath: /sms/auth
+ name: {{ include "common.fullname" . }}-auth
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- if .Values.nodeSelector }}
@@ -74,5 +76,8 @@ spec:
- name : {{ include "common.name" . }}
configMap:
name: {{ include "common.fullname" . }}
+ - name: {{ include "common.fullname" . }}-auth
+ persistentVolumeClaim:
+ claimName: {{ include "common.fullname" . }}
imagePullSecrets:
- name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/aaf/charts/sms/charts/vault/templates/pv.yaml b/kubernetes/aaf/charts/aaf-sms/templates/pv.yaml
index 37ed28ee9d..37ed28ee9d 100644
--- a/kubernetes/aaf/charts/sms/charts/vault/templates/pv.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/templates/pv.yaml
diff --git a/kubernetes/aaf/charts/aaf-sms/templates/pvc.yaml b/kubernetes/aaf/charts/aaf-sms/templates/pvc.yaml
new file mode 100644
index 0000000000..e6aacd1b96
--- /dev/null
+++ b/kubernetes/aaf/charts/aaf-sms/templates/pvc.yaml
@@ -0,0 +1,48 @@
+{{/*
+# Copyright 2018 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ name: {{ include "common.fullname" . }}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/kubernetes/aaf/charts/sms/charts/vault/templates/service.yaml b/kubernetes/aaf/charts/aaf-sms/templates/service.yaml
index 18334a301c..18334a301c 100644
--- a/kubernetes/aaf/charts/sms/charts/vault/templates/service.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/templates/service.yaml
diff --git a/kubernetes/aaf/charts/sms/values.yaml b/kubernetes/aaf/charts/aaf-sms/values.yaml
index 05d4e79e6b..cd3253941d 100644
--- a/kubernetes/aaf/charts/sms/values.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/values.yaml
@@ -22,6 +22,7 @@ global:
readinessImage: readiness-check:2.0.0
loggingRepository: docker.elastic.co
loggingImage: beats/filebeat:5.5.0
+ persistence: {}
#################################################################
# Application configuration defaults.
@@ -73,6 +74,14 @@ service:
internalPort: 10443
nodePort: 43
+persistence:
+ enabled: true
+ volumeReclaimPolicy: Retain
+ accessMode: ReadWriteOnce
+ size: 1Gi
+ mountPath: /dockerdata-nfs
+ mountSubPath: sms/auth
+
ingress:
enabled: false
diff --git a/kubernetes/aai/charts/aai-babel/resources/config/auth/auth_policy.json b/kubernetes/aai/charts/aai-babel/resources/config/auth/auth_policy.json
index 5340a6ac75..e7c6459f65 100644
--- a/kubernetes/aai/charts/aai-babel/resources/config/auth/auth_policy.json
+++ b/kubernetes/aai/charts/aai-babel/resources/config/auth/auth_policy.json
@@ -8,7 +8,7 @@
}
],
"users": [
- {"username": "CN=someone.onap.org, OU=someone, O=\"ONAP ORG\", L=NA, ST=NA, C=NA"}
+ {"username": "CN=ONAP, OU=ONAP, O=ONAP, L=Ottawa, ST=Ontario, C=CA"}
]
},
{
@@ -18,7 +18,7 @@
"methods": [{"name": "POST"}]
}],
"users": [
- {"username": "CN=someone.onap.org, OU=someone, O=\"ONAP ORG\", L=NA, ST=NA, C=NA"}
+ {"username": "CN=ONAP, OU=ONAP, O=ONAP, L=Ottawa, ST=Ontario, C=CA"}
]
},
{
@@ -30,7 +30,7 @@
}
],
"users": [
- {"username": "CN=someone.onap.org, OU=someone, O=\"ONAP ORG\", L=NA, ST=NA, C=NA"}
+ {"username": "CN=ONAP, OU=ONAP, O=ONAP, L=Ottawa, ST=Ontario, C=CA"}
]
},
{
diff --git a/kubernetes/aai/charts/aai-babel/resources/config/auth/tomcat_keystore b/kubernetes/aai/charts/aai-babel/resources/config/auth/tomcat_keystore
index cbec390428..9eec841aa2 100644
--- a/kubernetes/aai/charts/aai-babel/resources/config/auth/tomcat_keystore
+++ b/kubernetes/aai/charts/aai-babel/resources/config/auth/tomcat_keystore
Binary files differ
diff --git a/kubernetes/aai/charts/aai-babel/resources/config/babel-auth.properties b/kubernetes/aai/charts/aai-babel/resources/config/babel-auth.properties
index 8bf21b0b95..e7dfda081f 100644
--- a/kubernetes/aai/charts/aai-babel/resources/config/babel-auth.properties
+++ b/kubernetes/aai/charts/aai-babel/resources/config/babel-auth.properties
@@ -1,2 +1,2 @@
auth.policy.file=/auth/auth_policy.json
-auth.authentication.disable=false
+auth.authentication.disable=true
diff --git a/kubernetes/aai/charts/aai-babel/resources/config/logback.xml b/kubernetes/aai/charts/aai-babel/resources/config/logback.xml
new file mode 100644
index 0000000000..63b8faf09a
--- /dev/null
+++ b/kubernetes/aai/charts/aai-babel/resources/config/logback.xml
@@ -0,0 +1,179 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE xml>
+<configuration scan="true" scanPeriod="30 seconds" debug="true">
+ <include resource="org/springframework/boot/logging/logback/base.xml" />
+
+ <property name="componentName" value="AAI-BAS" />
+ <property name="logDirectory" value="${APP_HOME}/logs/${componentName}" />
+
+ <!-- default EELF log file names -->
+ <property name="generalLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+
+ <property name="errorLogPattern"
+ value="%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%mdc{RequestId}|%thread|%mdc{ServiceName}|%mdc{PartnerName}|%mdc{TargetEntity}|%mdc{TargetServiceName}|%.-5level|%logger|%mdc{ClassName}|%msg%n" />
+
+ <property name="auditLogPattern"
+ value="%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%mdc{BeginTimestamp}|%mdc{EndTimestamp}|%mdc{RequestId}|%mdc{ServiceInstanceId}|%thread|%mdc{ServerFQDN}|%mdc{ServiceName}|%mdc{PartnerName}|%mdc{StatusCode}|%mdc{ResponseCode}|%mdc{ResponseDescription}|%logger|%.-5level|||%mdc{ElapsedTime}|%mdc{RemoteHost}|%mdc{ClientAddress}|%mdc{ClassName}|||%msg%n" />
+
+ <property name="metricsLogPattern"
+ value="%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%mdc{BeginTimestamp}|%mdc{EndTimestamp}|%mdc{RequestId}|%mdc{ServiceInstanceId}|%thread|%mdc{ServerFQDN}|%mdc{ServiceName}|%mdc{PartnerName}|%mdc{TargetEntity}|%mdc{TargetServiceName}|%mdc{StatusCode}|%mdc{ResponseCode}|%mdc{ResponseDescription}|%logger|%.-5level|||%mdc{ElapsedTime}|%mdc{RemoteHost}|%mdc{ClientAddress}|%mdc{ClassName}|||%msg%n" />
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+
+ <!-- EELF Audit Appender. This appender is used to record audit engine related logging events. The audit logger and appender
+ are specializations of the EELF application root logger and appender. This can be used to segregate Policy engine events
+ from other components, or it can be eliminated to record these events as part of the application root log. -->
+
+ <appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${auditLogPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+
+ <appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${metricsLogPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics" />
+ </appender>
+
+ <appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>
+ ${logDirectory}/${debugLogName}.log
+ </file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <!-- allow only events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.core.filter.EvaluatorFilter">
+ <evaluator class="ch.qos.logback.classic.boolex.GEventEvaluator">
+ <expression>
+ e.level.toInt() &lt; INFO.toInt()
+ </expression>
+ </evaluator>
+ <OnMismatch>DENY</OnMismatch>
+ <OnMatch>NEUTRAL</OnMatch>
+ </filter>
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>false</includeCallerData>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- Default / root appenders -->
+ <!-- This determines the logging level for 3rd party code -->
+ <!-- ============================================================================ -->
+
+ <root level="INFO">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFDebug" />
+</root>
+
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+
+ <logger name="com.att.eelf" level="INFO" additivity="false">
+ <appender-ref ref="asyncEELF" />
+</logger>
+
+ <!-- The level of this logger determines the contents of the debug log -->
+ <logger name="com.att.eelf.debug" level="INFO" additivity="false">
+ <appender-ref ref="asyncEELFDebug" />
+</logger>
+
+ <logger name="com.att.eelf.audit" level="INFO" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+</logger>
+
+ <logger name="com.att.eelf.metrics" level="INFO" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+</logger>
+
+ <!-- ============================================================================ -->
+ <!-- Non-EELF loggers -->
+ <!-- ============================================================================ -->
+
+ <!-- ATT packages including DMAAP message routing -->
+ <logger name="com.att" level="INFO" />
+
+ <!-- Spring related loggers -->
+ <logger name="org.springframework" level="WARN" />
+ <logger name="org.springframework.beans" level="WARN" />
+ <logger name="org.springframework.web" level="WARN" />
+
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger name="org.apache" level="WARN" />
+ <logger name="org.apache.commons" level="WARN" />
+
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging. May aid in troubleshooting) -->
+ <logger name="org.apache.camel" level="WARN" />
+ <logger name="org.apache.cxf" level="WARN" />
+ <logger name="org.apache.camel.processor.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.service" level="WARN" />
+ <logger name="org.restlet" level="WARN" />
+ <logger name="org.apache.camel.component.restlet" level="WARN" />
+
+ <!-- logback internals logging -->
+ <logger name="ch.qos.logback.classic" level="WARN" />
+ <logger name="ch.qos.logback.core" level="WARN" />
+
+</configuration>
diff --git a/kubernetes/aai/charts/aai-babel/templates/deployment.yaml b/kubernetes/aai/charts/aai-babel/templates/deployment.yaml
index 6b1312bbdc..15cd163edb 100644
--- a/kubernetes/aai/charts/aai-babel/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-babel/templates/deployment.yaml
@@ -77,6 +77,9 @@ spec:
name: {{ include "common.fullname" . }}-secrets
- mountPath: /logs
name: {{ include "common.fullname" . }}-logs
+ - mountPath: /opt/app/babel/config/logback.xml
+ name: {{ include "common.fullname" . }}-config
+ subPath: logback.xml
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- if .Values.nodeSelector }}
@@ -99,6 +102,8 @@ spec:
path: artifact-generator.properties
- key: babel-auth.properties
path: babel-auth.properties
+ - key: logback.xml
+ path: logback.xml
- name: {{ include "common.fullname" . }}-secrets
secret:
secretName: {{ include "common.fullname" . }}-babel-secrets
diff --git a/kubernetes/aai/charts/aai-babel/values.yaml b/kubernetes/aai/charts/aai-babel/values.yaml
index 43f895267a..ef872a04a2 100644
--- a/kubernetes/aai/charts/aai-babel/values.yaml
+++ b/kubernetes/aai/charts/aai-babel/values.yaml
@@ -28,8 +28,8 @@ image: onap/babel:1.2-STAGING-latest
# application configuration
config:
- keyStorePassword: OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
- keyManagerPassword: OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
+ keyStorePassword: OBF:1y0q1uvc1uum1uvg1pil1pjl1uuq1uvk1uuu1y10
+ keyManagerPassword: OBF:1y0q1uvc1uum1uvg1pil1pjl1uuq1uvk1uuu1y10
# default number of instances
replicaCount: 1
diff --git a/kubernetes/aai/charts/aai-cassandra/templates/statefulset.yaml b/kubernetes/aai/charts/aai-cassandra/templates/statefulset.yaml
index aa3482d293..92f05dda7e 100644
--- a/kubernetes/aai/charts/aai-cassandra/templates/statefulset.yaml
+++ b/kubernetes/aai/charts/aai-cassandra/templates/statefulset.yaml
@@ -77,7 +77,7 @@ spec:
fieldPath: status.podIP
volumeMounts:
- name: cassandra-data
- mountPath: /var/lib/cassandra/data
+ mountPath: /var/lib/cassandra
resources:
{{ toYaml .Values.resources | indent 10 }}
{{- if .Values.nodeSelector }}
diff --git a/kubernetes/aai/charts/aai-cassandra/values.yaml b/kubernetes/aai/charts/aai-cassandra/values.yaml
index 884bc396a8..baa87c2a72 100644
--- a/kubernetes/aai/charts/aai-cassandra/values.yaml
+++ b/kubernetes/aai/charts/aai-cassandra/values.yaml
@@ -62,7 +62,7 @@ ingress:
enabled: false
persistence:
- enabled: false
+ enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
diff --git a/kubernetes/aai/charts/aai-champ/resources/config/dynamic/conf/champ-beans.xml b/kubernetes/aai/charts/aai-champ/resources/config/dynamic/conf/champ-beans.xml
index 6a66811e8c..a271402637 100644
--- a/kubernetes/aai/charts/aai-champ/resources/config/dynamic/conf/champ-beans.xml
+++ b/kubernetes/aai/charts/aai-champ/resources/config/dynamic/conf/champ-beans.xml
@@ -21,9 +21,8 @@ xsi:schemaLocation="
<entry key="champcore.event.stream.publisher-pool-size" value="10" value-type="java.lang.Integer"/>
<entry key="champcore.event.stream.publisher" value-ref="champEventPublisher"/>
- <entry key="graph.name" value="aaigraph.dev"/>
+ <entry key="graph.name" value="aaigraph"/>
<entry key="storage.backend" value="cassandra"/>
- <entry key="storage.cassandra.keyspace" value="aaigraph"/>
<entry key="storage.cassandra.read-consistency-level" value="LOCAL_QUORUM"/>
<entry key="storage.cassandra.write-consistency-level" value="LOCAL_QUORUM"/>
<entry key="storage.cassandra.replication-factor" value="3"/>
@@ -37,7 +36,7 @@ xsi:schemaLocation="
<!-- Janus Implementation -->
<bean id="graphBuilder" class="org.onap.aai.champjanus.graph.impl.JanusChampGraphImpl$Builder">
- <constructor-arg value="aaigraph.dev"/>
+ <constructor-arg value="aaigraph"/>
<constructor-arg ref="props" />
</bean>
diff --git a/kubernetes/aai/charts/aai-modelloader/resources/config/log/logback.xml b/kubernetes/aai/charts/aai-modelloader/resources/config/log/logback.xml
index d512d3b91c..fcfac840b1 100644
--- a/kubernetes/aai/charts/aai-modelloader/resources/config/log/logback.xml
+++ b/kubernetes/aai/charts/aai-modelloader/resources/config/log/logback.xml
@@ -1,161 +1,152 @@
-<configuration debug="false" scan="true" scanPeriod="3 seconds">
- <!--<jmxConfigurator /> -->
- <!-- directory path for all other type logs -->
- <property name="logDir" value="/var/log/onap" />
- <property name="componentName" value="aai"></property>
- <property name="subComponentName" value="aai-ml"></property>
- <property name="logDirectory" value="${logDir}/${componentName}/${subComponentName}" />
- <!-- default eelf log file names -->
- <property name="generalLogName" value="error" />
- <property name="metricsLogName" value="metrics" />
- <property name="auditLogName" value="audit" />
- <property name="debugLogName" value="debug" />
- <property name="queueSize" value="256" />
- <property name="maxFileSize" value="50MB" />
- <property name="maxHistory" value="30" />
- <property name="totalSizeCap" value="10GB" />
- <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
- <!-- Example evaluator filter applied against console appender -->
- <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <!-- ============================================================================ -->
- <!-- EELF Appenders -->
- <!-- ============================================================================ -->
- <!-- The EELFAppender is used to record events to the general application
- log -->
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELF">
- <file>${logDirectory}/${generalLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELF">
- <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
- <level>INFO</level>
- </filter>
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELF" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFAudit">
- <file>${logDirectory}/${auditLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFAudit">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFAudit" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFMetrics">
- <file>${logDirectory}/${metricsLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFMetrics">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFMetrics" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFDebug">
- <file>${logDirectory}/${debugLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFDebug">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFDebug" />
- <includeCallerData>true</includeCallerData>
- </appender>
- <!-- ============================================================================ -->
- <!-- EELF loggers -->
- <!-- ============================================================================ -->
- <logger additivity="false" level="info" name="com.att.eelf">
- <appender-ref ref="asyncEELF" />
- <appender-ref ref="asyncEELFDebug" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.security">
- <appender-ref ref="asyncEELFSecurity" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.perf">
- <appender-ref ref="asyncEELFPerformance" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.server">
- <appender-ref ref="asyncEELFServer" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.policy">
- <appender-ref ref="asyncEELFPolicy" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.audit">
- <appender-ref ref="asyncEELFAudit" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.metrics">
- <appender-ref ref="asyncEELFMetrics" />
- </logger>
- <!-- Spring related loggers -->
- <logger level="WARN" name="org.springframework" />
- <logger level="WARN" name="org.springframework.beans" />
- <logger level="WARN" name="org.springframework.web" />
- <logger level="WARN" name="com.blog.spring.jms" />
- <logger level="INFO" name="com.att" />
- <!-- Model Loader loggers -->
- <logger level="INFO" name="org.openecomp.modelloader" />
- <!-- Other Loggers that may help troubleshoot -->
- <logger level="WARN" name="net.sf" />
- <logger level="WARN" name="org.apache.commons.httpclient" />
- <logger level="WARN" name="org.apache.commons" />
- <logger level="WARN" name="org.apache.coyote" />
- <logger level="WARN" name="org.apache.jasper" />
- <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
- May aid in troubleshooting) -->
- <logger level="WARN" name="org.apache.camel" />
- <logger level="WARN" name="org.apache.cxf" />
- <logger level="WARN" name="org.apache.camel.processor.interceptor" />
- <logger level="WARN" name="org.apache.cxf.jaxrs.interceptor" />
- <logger level="WARN" name="org.apache.cxf.service" />
- <logger level="WARN" name="org.restlet" />
- <logger level="WARN" name="org.apache.camel.component.restlet" />
- <!-- logback internals logging -->
- <logger level="WARN" name="ch.qos.logback.classic" />
- <logger level="WARN" name="ch.qos.logback.core" />
- <root>
- <appender-ref ref="asyncEELF" />
- <!-- <appender-ref ref="asyncEELFDebug" /> -->
- </root>
+<configuration scan="true" scanPeriod="3 seconds" debug="false">
+ <include resource="org/springframework/boot/logging/logback/base.xml" />
+ <property name="logDir" value="${AJSC_HOME}/logs" />
+ <property name="componentName" value="AAI-ML"></property>
+
+ <!-- default eelf log file names -->
+ <property name="generalLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+
+ <property name="errorLogPattern"
+ value="%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%mdc{RequestId}|%thread|ModelLoader|%mdc{PartnerName}|%logger||%.-5level|%msg%n" />
+ <property name="auditMetricPattern" value="%m%n" />
+
+ <property name="logDirectory" value="${logDir}/${componentName}" />
+
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+
+ <appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${auditMetricPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+
+ <appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${auditMetricPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics" />
+ </appender>
+
+ <appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${debugLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>true</includeCallerData>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger name="com.att.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFDebug" />
+ </logger>
+ <logger name="com.att.eelf.audit" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+ <logger name="com.att.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+
+ <!-- Spring related loggers -->
+ <logger name="org.springframework" level="WARN" />
+ <logger name="org.springframework.beans" level="WARN" />
+ <logger name="org.springframework.web" level="WARN" />
+ <logger name="com.blog.spring.jms" level="WARN" />
+
+ <logger name="com.att" level="INFO" />
+
+ <!-- Model Loader loggers -->
+ <logger name="org.openecomp.modelloader" level="INFO" />
+
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger name="net.sf" level="WARN" />
+ <logger name="org.apache.commons.httpclient" level="WARN" />
+ <logger name="org.apache.commons" level="WARN" />
+ <logger name="org.apache.coyote" level="WARN" />
+ <logger name="org.apache.jasper" level="WARN" />
+
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger name="org.apache.camel" level="WARN" />
+ <logger name="org.apache.cxf" level="WARN" />
+ <logger name="org.apache.camel.processor.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.service" level="WARN" />
+ <logger name="org.restlet" level="WARN" />
+ <logger name="org.apache.camel.component.restlet" level="WARN" />
+
+ <!-- logback internals logging -->
+ <logger name="ch.qos.logback.classic" level="WARN" />
+ <logger name="ch.qos.logback.core" level="WARN" />
+
+ <root>
+ <appender-ref ref="asyncEELF" />
+ <!-- <appender-ref ref="asyncEELFDebug" /> -->
+</root>
+
</configuration>
diff --git a/kubernetes/aai/charts/aai-modelloader/resources/config/model-loader.properties b/kubernetes/aai/charts/aai-modelloader/resources/config/model-loader.properties
index e9b24b667c..38c25480c0 100644
--- a/kubernetes/aai/charts/aai-modelloader/resources/config/model-loader.properties
+++ b/kubernetes/aai/charts/aai-modelloader/resources/config/model-loader.properties
@@ -24,7 +24,7 @@ ml.aai.AUTH_USER=ModelLoader
ml.aai.AUTH_PASSWORD=OBF:1qvu1v2h1sov1sar1wfw1j7j1wg21saj1sov1v1x1qxw
# Model Loader Babel REST Client Configuration\r
-ml.babel.BASE_URL=https://aai-babel.{{.Release.Namespace}}:9516
+ml.babel.BASE_URL=https://babel.{{.Release.Namespace}}:9516
ml.babel.GENERATE_ARTIFACTS_URL=/services/babel-service/v1/app/generateArtifacts
ml.babel.KEYSTORE_FILE=babel-client-cert.p12
ml.babel.KEYSTORE_PASSWORD=OBF:1y0q1uvc1uum1uvg1pil1pjl1uuq1uvk1uuu1y10
diff --git a/kubernetes/aai/charts/aai-modelloader/templates/deployment.yaml b/kubernetes/aai/charts/aai-modelloader/templates/deployment.yaml
index 38ebe7689f..4bfa67de41 100644
--- a/kubernetes/aai/charts/aai-modelloader/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-modelloader/templates/deployment.yaml
@@ -50,9 +50,9 @@ spec:
name: {{ include "common.fullname" . }}-prop-config
- mountPath: /opt/app/model-loader/config/auth/
name: {{ include "common.fullname" . }}-auth-config
- - mountPath: /var/log/onap
+ - mountPath: /logs
name: {{ include "common.fullname" . }}-logs
- - mountPath: /opt/app/model-loader/bundleconfig/etc/logback.xml
+ - mountPath: /opt/app/model-loader/logback.xml
name: {{ include "common.fullname" . }}-log-conf
subPath: logback.xml
ports:
@@ -67,11 +67,10 @@ spec:
- mountPath: /usr/share/filebeat/filebeat.yml
subPath: filebeat.yml
name: filebeat-conf
- - mountPath: /var/log/onap
+ - mountPath: /logs
name: {{ include "common.fullname" . }}-logs
- mountPath: /usr/share/filebeat/data
name: aai-filebeat
-
volumes:
- name: localtime
hostPath:
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/aai.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/aai.properties
deleted file mode 100644
index 813a263d90..0000000000
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/aai.properties
+++ /dev/null
@@ -1,87 +0,0 @@
-################################################################################################################
-############################## ActiveInventoryDataCollector TLS/SSL configuration ##############################
-################################################################################################################
-
-############################## Networking ##############################
-#
-# The ip address/hostname and port to the desired AAI instance
-#
-aai.rest.host=aai.{{.Release.Namespace}}
-aai.rest.port=8443
-
-############################## REST ##############################
-# OXM version
-aai.rest.resourceBasePath=/aai/v9
-aai.rest.connectTimeoutInMs=30000
-aai.rest.readTimeoutInMs=60000
-aai.rest.numRequestRetries=5
-# HTTP_NOAUTH - straight HTTP no user/pass
-# SSL_BASIC - HTTP/S with user/pass
-# SSL_CERT - HTTP/S with client cert
-aai.rest.authenticationMode=SSL_BASIC
-
-############################## Cache ##############################
-# Experimental caching feature that is NOT production ready.
-# Enable at your own risk... it might not work.
-aai.rest.cache.enabled=false
-aai.rest.cache.numWorkers=10
-aai.rest.cache.cacheFailures=false
-aai.rest.cache.useCacheOnly=false
-aai.rest.cache.storageFolderOverride=
-aai.rest.cache.maxTimeToLiveInMs=-1
-
-# The shallowEntity filter will display the entity in a visualization
-# but will not collect it's relationships or complex attributes.
-aai.rest.shallowEntities=cloud-region,complex,vnf-image,att-aic,image
-
-############################## Certs, Auth, and SSL Settings ##############################
-aai.ssl.keystore.filename=aai-os-cert.p12
-aai.ssl.keystore.pass=OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
-aai.ssl.keystore.type=pkcs12
-# Enable debug on the SSL connections
-aai.ssl.enableDebug=false
-# Degree of strictness to SSL connection standards
-aai.ssl.validateServerHostName=false;
-aai.ssl.validateServerCertificateChain=false;
-# If basic auth is implemented, username and password as required
-aai.ssl.basicAuth.username=AaiUI
-aai.ssl.basicAuth.password=OBF:1gfr1p571unz1p4j1gg7
-
-############################## Statistics Report Formatting ##############################
-#
-# During synchronization, a formatted statisitics log is generated
-#
-# Response size in bytes histogram
-aai.taskProcessor.bytesHistogramLabel="[Response Size In Bytes]"
-aai.taskProcessor.bytesHistogramMaxYAxis=1000000
-aai.taskProcessor.bytesHistogramNumBins=20
-aai.taskProcessor.bytesHistogramNumDecimalPoints=2
-# "Work on Hand" statisitcs for external resource requests
-aai.taskProcessor.queueLengthHistogramLabel="[Queue Item Length]"
-aai.taskProcessor.queueLengthHistogramMaxYAxis=20000
-aai.taskProcessor.queueLengthHistogramNumBins=20
-aai.taskProcessor.queueLengthHistogramNumDecimalPoints=2
-# Time on queue (how long does a task stay on the work queue)
-aai.taskProcessor.taskAgeHistogramLabel="[Task Age In Ms]"
-aai.taskProcessor.taskAgeHistogramMaxYAxis=600000
-aai.taskProcessor.taskAgeHistogramNumBins=20
-aai.taskProcessor.taskAgeHistogramNumDecimalPoints=2
-# Per transaction response time for external resource requests
-aai.taskProcessor.responseTimeHistogramLabel="[Response Time In Ms]"
-aai.taskProcessor.responseTimeHistogramMaxYAxis=10000
-aai.taskProcessor.responseTimeHistogramNumBins=20
-aai.taskProcessor.responseTimeHistogramNumDecimalPoints=2
-# Transaction throughput velocity
-aai.taskProcessor.tpsHistogramLabel="[Transactions Per Second]"
-aai.taskProcessor.tpsHistogramMaxYAxis=100
-aai.taskProcessor.tpsHistogramNumBins=20
-aai.taskProcessor.tpsHistogramNumDecimalPoints=2
-
-############################## Deprecated, to be removed or updated ##############################
-aai.rest.numResolverWorkers=15
-aai.ssl.truststore.filename=asdc-client.jks
-aai.ssl.truststore.type=jks
-aai.taskProcessor.maxConcurrentWorkers=5
-aai.taskProcessor.transactionRateControllerEnabled=false
-aai.taskProcessor.numSamplesPerThreadForRunningAverage=100
-aai.taskProcessor.targetTPS=100
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/csp-cookie-filter.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/csp-cookie-filter.properties
new file mode 100644
index 0000000000..2315b9f559
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/csp-cookie-filter.properties
@@ -0,0 +1,12 @@
+global.login.url=aaiportal.onap.org
+
+# MOTS ID of the application
+application.id=12345
+
+# valid domains for open redirect
+redirect-domain=domain.com
+
+# Required by esGateKeeper. Valid values are:
+# DEVL - used during development
+# PROD - used in production
+gatekeeper.environment=TEST \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/inventory-ui-keystore b/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/inventory-ui-keystore
deleted file mode 100644
index efa01f8d79..0000000000
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/inventory-ui-keystore
+++ /dev/null
Binary files differ
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/tomcat_keystore b/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/tomcat_keystore
new file mode 100644
index 0000000000..9eec841aa2
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/auth/tomcat_keystore
Binary files differ
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-filter-aggregation.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-filter-aggregation.xml
new file mode 100644
index 0000000000..e4e02bae94
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-filter-aggregation.xml
@@ -0,0 +1,7 @@
+<rests xmlns="http://camel.apache.org/schema/spring">
+ <rest>
+ <post uri="/search/filterAggregation">
+ <to uri="bean:aggregateSummaryProcessor?method=getFilteredAggregation"/>
+ </post>
+ </rest>
+</rests> \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-prepareSchema.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-prepareSchema.xml
new file mode 100644
index 0000000000..20dba6f13c
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-prepareSchema.xml
@@ -0,0 +1,11 @@
+<rests xmlns="http://camel.apache.org/schema/spring">
+ <rest>
+ <post uri="/visualization/prepareVisualization">
+ <route>
+ <to uri="bean:schemaVisualizationProcessor?method=processVisualizationRequest" />
+ </route>
+ </post>
+ </rest>
+</rests>
+
+
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-subscriptionService.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-subscriptionService.xml
new file mode 100644
index 0000000000..f6a2953b3a
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-subscriptionService.xml
@@ -0,0 +1,7 @@
+<rests xmlns="http://camel.apache.org/schema/spring">
+ <rest>
+ <get uri="/subscription/getsubscription">
+ <to uri="bean:subscriptionServiceProcessor?method=getSubscription"/>
+ </get>
+ </rest>
+</rests> \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-unified-search.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-unified-search.xml
new file mode 100644
index 0000000000..61fd9ad155
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-unified-search.xml
@@ -0,0 +1,7 @@
+<rests xmlns="http://camel.apache.org/schema/spring">
+ <rest>
+ <post uri="/search/querysearch">
+ <to uri="bean:unifiedSearchProcessor?method=search"/>
+ </post>
+ </rest>
+</rests> \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-unifiedFilterRequest.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-unifiedFilterRequest.xml
new file mode 100644
index 0000000000..1b975e9dd3
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/camel-rests/sparky-core-unifiedFilterRequest.xml
@@ -0,0 +1,7 @@
+<rests xmlns="http://camel.apache.org/schema/spring">
+ <rest>
+ <post uri="/search/unifiedFilterRequest">
+ <to uri="bean:filterProcessor?method=getFiltersWithValues"/>
+ </post>
+ </rest>
+</rests> \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/descriptors/aaiEntityNodeDescriptors.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/descriptors/aaiEntityNodeDescriptors.json
new file mode 100644
index 0000000000..e72bab0f28
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/descriptors/aaiEntityNodeDescriptors.json
@@ -0,0 +1,218 @@
+{
+ "generalNodeClass": {
+ "class": "aai-entity-node general-node",
+ "visualElements": [{
+ "type": "circle",
+ "class": "outer",
+ "svgAttributes": {
+ "r": "16"
+ }
+ },
+ {
+ "type": "circle",
+ "class": "inner",
+ "svgAttributes": {
+ "r": "10"
+ }
+ },
+ {
+ "type": "text",
+ "class": "id-type-label",
+ "displayKey": "itemType",
+ "shapeAttributes": {
+ "offset": {
+ "x": "0",
+ "y": "33"
+ }
+ }
+ },
+ {
+ "type": "text",
+ "class": "id-value-label",
+ "displayKey": "itemNameValue",
+ "shapeAttributes": {
+ "offset": {
+ "x": "0",
+ "y": "48"
+ }
+ }
+ }]
+ },
+ "searchedNodeClass": {
+ "class": "aai-entity-node search-node",
+ "visualElements": [{
+ "type": "circle",
+ "class": "outer",
+ "svgAttributes": {
+ "r": "16"
+ }
+ },
+ {
+ "type": "circle",
+ "class": "inner",
+ "svgAttributes": {
+ "r": "10"
+ }
+ },
+ {
+ "type": "text",
+ "class": "id-type-label",
+ "displayKey": "itemType",
+ "shapeAttributes": {
+ "offset": {
+ "x": "0",
+ "y": "33"
+ }
+ }
+ },
+ {
+ "type": "text",
+ "class": "id-value-label",
+ "displayKey": "itemNameValue",
+ "shapeAttributes": {
+ "offset": {
+ "x": "0",
+ "y": "48"
+ }
+ }
+ }]
+ },
+ "selectedSearchedNodeClass": {
+ "class": "aai-entity-node selected-search-node",
+ "visualElements": [{
+ "type": "circle",
+ "class": "outer",
+ "svgAttributes": {
+ "r": "31"
+ }
+ },
+ {
+ "type": "circle",
+ "class": "inner",
+ "svgAttributes": {
+ "r": "20"
+ }
+ },
+ {
+ "type": "text",
+ "class": "id-type-label",
+ "displayKey": "itemType",
+ "shapeAttributes": {
+ "offset": {
+ "x": "0",
+ "y": "48"
+ }
+ }
+ },
+ {
+ "type": "text",
+ "class": "id-value-label",
+ "displayKey": "itemNameValue",
+ "shapeAttributes": {
+ "offset": {
+ "x": "0",
+ "y": "63"
+ }
+ }
+ },
+ {
+ "type": "button",
+ "name": "icon_ellipses",
+ "class": "node-button",
+ "shapeAttributes": {
+ "offset": {
+ "x": "33",
+ "y": "-35"
+ }
+ },
+ "svgAttributes": {
+ "className": "node-button",
+ "r": "10"
+ }
+ },
+ {
+ "type": "button",
+ "name": "icon_triangle_warning",
+ "class": "node-button",
+ "shapeAttributes": {
+ "offset": {
+ "x": "46",
+ "y": "-12"
+ }
+ },
+ "svgAttributes": {
+ "className": "node-button",
+ "r": "10"
+ }
+ }]
+ },
+ "selectedNodeClass": {
+ "class": "aai-entity-node selected-node",
+ "visualElements": [{
+ "type": "circle",
+ "class": "outer",
+ "svgAttributes": {
+ "r": "31"
+ }
+ },
+ {
+ "type": "circle",
+ "class": "inner",
+ "svgAttributes": {
+ "r": "20"
+ }
+ },
+ {
+ "type": "text",
+ "class": "id-type-label",
+ "displayKey": "itemType",
+ "shapeAttributes": {
+ "offset": {
+ "x": "0",
+ "y": "48"
+ }
+ }
+ },
+ {
+ "type": "text",
+ "class": "id-value-label",
+ "displayKey": "itemNameValue",
+ "shapeAttributes": {
+ "offset": {
+ "x": "0",
+ "y": "63"
+ }
+ }
+ },
+ {
+ "type": "button",
+ "name": "icon_ellipses",
+ "class": "node-button",
+ "shapeAttributes": {
+ "offset": {
+ "x": "33",
+ "y": "-35"
+ }
+ },
+ "svgAttributes": {
+ "className": "node-button",
+ "r": "10"
+ }
+ },
+ {
+ "type": "button",
+ "name": "icon_triangle_warning",
+ "class": "node-button",
+ "shapeAttributes": {
+ "offset": {
+ "x": "46",
+ "y": "-12"
+ }
+ },
+ "svgAttributes": {
+ "className": "node-button",
+ "r": "10"
+ }
+ }]
+ }
+} \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/elasticsearch.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/elasticsearch.properties
deleted file mode 100644
index 082744b94e..0000000000
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/elasticsearch.properties
+++ /dev/null
@@ -1,72 +0,0 @@
-#######################################################################################
-############################## ElasticSearch Config ###################################
-#######################################################################################
-
-############################## Networking ##############################
-#
-# The ip address/hostname and port to the desired AAI instance
-# For development it's recommended to use a local instance of ES
-#
-elasticsearch.ipAddress=aai-elasticsearch.{{.Release.Namespace}}
-elasticsearch.httpPort={{ .Values.config.elasticsearchHttpPort }}
-elasticsearch.javaApiPort=8443
-
-############################## Indexes ##############################
-#
-# Index names for various searches.
-#
-elasticsearch.indexName=entitysearchindex
-elasticsearch.topographicalIndexName=topographicalsearchindex
-elasticsearch.entityCountHistoryIndexName=entitycounthistoryindex
-elasticsearch.autosuggestIndexname=entityautosuggestindex
-
-# Default document type
-elasticsearch.type=default
-
-############################## Index Mappings and Settings ##############################
-#
-# JSON files for sparky elasticsearch indexes.
-#
-elasticsearch.mappingsFileName=/etc/es_mappings.json
-elasticsearch.settingsFileName=/etc/es_settings.json
-elasticsearch.autosuggestSettingsFileName=/etc/autoSuggestSettings.json
-elasticsearch.autosuggestMappingsFileName=/etc/autoSuggestMappings.json
-elasticsearch.dynamicMappingsFileName=/etc/dynamicMappings.json
-elasticsearch.entityCountHistoryMappingsFileName=/etc/entityCountHistoryMappings.json
-
-############################## Statistics Report Formatting ##############################
-#
-# During synchronization, a formatted statisitics log is generated.
-#
-# Response size in bytes histogram
-elasticsearch.taskProcessor.bytesHistogramLabel="[Response Size In Bytes]"
-elasticsearch.taskProcessor.bytesHistogramMaxYAxis=1000000
-elasticsearch.taskProcessor.bytesHistogramNumBins=20
-elasticsearch.taskProcessor.bytesHistogramNumDecimalPoints=2
-# "Work on Hand" statisitcs for external resource requests
-elasticsearch.taskProcessor.queueLengthHistogramLabel="[Queue Item Length]"
-elasticsearch.taskProcessor.queueLengthHistogramMaxYAxis=20000
-elasticsearch.taskProcessor.queueLengthHistogramNumBins=20
-elasticsearch.taskProcessor.queueLengthHistogramNumDecimalPoints=2
-# Time on queue (how long does a task stay on the work queue)
-elasticsearch.taskProcessor.taskAgeHistogramLabel="[Task Age In Ms]"
-elasticsearch.taskProcessor.taskAgeHistogramMaxYAxis=600000
-elasticsearch.taskProcessor.taskAgeHistogramNumBins=20
-elasticsearch.taskProcessor.taskAgeHistogramNumDecimalPoints=2
-# Per transaction response time for external resource requests
-elasticsearch.taskProcessor.responseTimeHistogramLabel="[Response Time In Ms]"
-elasticsearch.taskProcessor.responseTimeHistogramMaxYAxis=1000
-elasticsearch.taskProcessor.responseTimeHistogramNumBins=20
-elasticsearch.taskProcessor.responseTimeHistogramNumDecimalPoints=2
-# Transaction throughput velocity
-elasticsearch.taskProcessor.tpsHistogramLabel="[Transactions Per Second]"
-elasticsearch.taskProcessor.tpsHistogramMaxYAxis=100
-elasticsearch.taskProcessor.tpsHistogramNumBins=20
-elasticsearch.taskProcessor.tpsHistogramNumDecimalPoints=2
-
-############################## Deprecated, to be removed or updated ##############################
-elasticsearch.taskProcessor.maxConcurrentWorkers=5
-elasticsearch.taskProcessor.transactionRateControllerEnabled=false
-elasticsearch.taskProcessor.numSamplesPerThreadForRunningAverage=100
-elasticsearch.taskProcessor.targetTPS=100
-elasticsearch.clusterName=ES_AAI_LOCALHOST
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/aaiui_filters.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/aaiui_filters.json
new file mode 100644
index 0000000000..62b681139d
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/aaiui_filters.json
@@ -0,0 +1,79 @@
+{
+ "filters": [
+ {
+ "filterId": "1",
+ "filterName": "Orchestration-Status",
+ "displayName": "Orchestration Status",
+ "dataType": "dropDown",
+ "multiSelect": "false",
+ "watermark": "Any Orchestration Status",
+ "optionsType": "options",
+ "dataSource": {
+ "indexName": "aggregate_generic-vnf_index",
+ "docType": "default",
+ "fieldName": "orchestration-status"
+ }
+ },
+ {
+ "filterId": "2",
+ "filterName": "Prov-Status",
+ "displayName": "Provisioning Status",
+ "dataType": "dropDown",
+ "multiSelect": "false",
+ "watermark": "Any Provisioning Status",
+ "optionsType": "options",
+ "dataSource": {
+ "indexName": "aggregate_generic-vnf_index",
+ "docType": "default",
+ "fieldName": "prov-status"
+ }
+ },
+ {
+ "filterId": "5",
+ "filterName": "Date",
+ "displayName": "Date",
+ "dataType": "date",
+ "multiSelect": "false",
+ "watermark": "Choose Date Range",
+ "defaultValue" : {"decode": "Today", "code": "last_0_hours"},
+ "optionsType": "dynamicOptions",
+ "optionsValues": [
+ {"decode": "Today", "code": "last_0_hours"},
+ {"decode": "Since Yesterday", "code": "last_1_days"},
+ {"decode": "Since Last Week", "code": "last_1_weeks"},
+ {"decode": "Since Last Month", "code": "last_1_months"},
+ {"decode": "Since Last Year", "code": "last_1_years"},
+ {"decode": "Custom Range", "code": "custom_range"}
+ ]
+ },
+ {
+ "filterId": "7",
+ "filterName": "NF-Type",
+ "displayName": "Network Function Type",
+ "dataType": "dropDown",
+ "multiSelect": "false",
+ "watermark": "Any Network Function Type",
+ "optionsType": "options",
+ "dataSource": {
+ "indexName": "aggregate_generic-vnf_index",
+ "docType": "default",
+ "fieldName": "nf-type"
+ }
+ },
+ {
+ "filterId": "8",
+ "filterName": "NF-Role",
+ "displayName": "Network Function Role",
+ "dataType": "dropDown",
+ "multiSelect": "false",
+ "watermark": "Any Network Function Role",
+ "optionsType": "options",
+ "dataSource": {
+ "indexName": "aggregate_generic-vnf_index",
+ "docType": "default",
+ "fieldName": "nf-role"
+ }
+ }
+
+ ]
+} \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/aaiui_views.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/aaiui_views.json
new file mode 100644
index 0000000000..9ca0119dc0
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/aaiui_views.json
@@ -0,0 +1,21 @@
+{
+ "views": [
+ {
+ "viewName" : "vnfSearch",
+ "filters" : [
+ {
+ "filterId": "1"
+ },
+ {
+ "filterId": "2"
+ },
+ {
+ "filterId": "7"
+ },
+ {
+ "filterId": "8"
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/subscription_object_inspector_mapping.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/subscription_object_inspector_mapping.json
new file mode 100644
index 0000000000..ad2ab7aa5f
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/filters/subscription_object_inspector_mapping.json
@@ -0,0 +1,16 @@
+{
+ "target": "",
+ "origin": "",
+ "messageType": "",
+ "topic": "",
+ "message": {
+ "applicationName": "",
+ "payload": {
+ "action": "",
+ "params": {
+ "objectName": "",
+ "externalClassId": ""
+ }
+ }
+ }
+}
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/log/logback.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/log/logback.xml
index c1b99c183f..3f96497c60 100644
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/log/logback.xml
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/log/logback.xml
@@ -1,200 +1,172 @@
-<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="3 seconds" debug="false">
- <!--<jmxConfigurator /> -->
- <!-- directory path for all other type logs -->
- <property name="logDir" value="/var/log/onap" />
- <property name="componentName" value="AAI-UI" />
- <property name="logDirectory" value="${logDir}/${componentName}" />
- <!-- default eelf log file names -->
- <property name="generalLogName" value="application" />
- <property name="errorLogName" value="error" />
- <property name="metricsLogName" value="metrics" />
- <property name="auditLogName" value="audit" />
- <property name="debugLogName" value="debug" />
- <property name="queueSize" value="256" />
- <property name="maxFileSize" value="50MB" />
- <property name="maxHistory" value="30" />
- <property name="totalSizeCap" value="10GB" />
- <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger{36}\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
-
- <!-- Example evaluator filter applied against console appender -->
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <!-- ============================================================================ -->
- <!-- EELF Appenders -->
- <!-- ============================================================================ -->
- <!-- The EELFAppender is used to record events to the general application
- log -->
- <appender name="EELF" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${generalLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
- <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
- <level>INFO</level>
- </filter>
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELF" />
- </appender>
- <!-- EELF Audit Appender. This appender is used to record audit engine
- related logging events. The audit logger and appender are specializations
- of the EELF application root logger and appender. This can be used to segregate
- Policy engine events from other components, or it can be eliminated to record
- these events as part of the application root log. -->
- <appender name="EELFAudit" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${auditLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFAudit" />
- </appender>
- <appender name="EELFMetrics" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${metricsLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFMetrics" />
- </appender>
-
- <appender name="EELFError" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${errorLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
- <level>INFO</level>
- </filter>
- </appender>
- <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFError" />
- </appender>
-
- <appender name="EELFDebug" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${debugLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFDebug" />
- <includeCallerData>false</includeCallerData>
- </appender>
- <!-- ============================================================================ -->
- <!-- EELF loggers -->
- <!-- ============================================================================ -->
- <logger name="com.att.eelf" level="INFO" additivity="false" />
- <logger name="org.elasticsearch.plugins" level="WARN" />
- <logger name="com.att.eelf.debug" level="debug" additivity="false">
- <appender-ref ref="asyncEELFDebug" />
- </logger>
- <logger name="com.att.eelf.audit" level="info" additivity="false">
- <appender-ref ref="asyncEELFAudit" />
- </logger>
- <logger name="com.att.eelf.metrics" level="info" additivity="false">
- <appender-ref ref="asyncEELFMetrics" />
- </logger>
- <logger name="com.att.eelf.error" level="info" additivity="false">
- <appender-ref ref="asyncEELFError" />
- </logger>
- <!-- Spring related loggers -->
- <logger name="org.springframework" level="WARN" />
- <logger name="org.springframework.beans" level="WARN" />
- <logger name="org.springframework.web" level="WARN" />
- <logger name="com.blog.spring.jms" level="WARN" />
- <!-- AJSC Services (bootstrap services) -->
- <logger name="ajsc" level="WARN" />
- <logger name="ajsc.RouteMgmtService" level="WARN" />
- <logger name="ajsc.ComputeService" level="WARN" />
- <logger name="ajsc.VandelayService" level="WARN" />
- <logger name="ajsc.FilePersistenceService" level="WARN" />
- <logger name="ajsc.UserDefinedJarService" level="WARN" />
- <logger name="ajsc.UserDefinedBeansDefService" level="WARN" />
- <logger name="ajsc.LoggingConfigurationService" level="WARN" />
- <logger name="ajsc.ErrorMessageLookupService" level="WARN" />
- <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
- logging) -->
- <logger name="ajsc.utils" level="WARN" />
- <logger name="ajsc.utils.DME2Helper" level="WARN" />
- <logger name="ajsc.filters" level="WARN" />
- <logger name="ajsc.beans.interceptors" level="WARN" />
- <logger name="ajsc.restlet" level="WARN" />
- <logger name="ajsc.servlet" level="WARN" />
- <logger name="com.att" level="WARN" />
- <logger name="com.att.ajsc.csi.logging" level="WARN" />
- <logger name="com.att.ajsc.filemonitor" level="WARN" />
- <!-- Other Loggers that may help troubleshoot -->
- <logger name="net.sf" level="WARN" />
- <logger name="org.apache.commons.httpclient" level="WARN" />
- <logger name="org.apache.commons" level="WARN" />
- <logger name="org.apache.coyote" level="WARN" />
- <logger name="org.apache.jasper" level="WARN" />
- <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
- May aid in troubleshooting) -->
- <logger name="org.apache.camel" level="WARN" />
- <logger name="org.apache.cxf" level="WARN" />
- <logger name="org.apache.camel.processor.interceptor" level="WARN" />
- <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" />
- <logger name="org.apache.cxf.service" level="WARN" />
- <logger name="org.restlet" level="WARN" />
- <logger name="org.apache.camel.component.restlet" level="WARN" />
- <!-- logback internals logging -->
- <logger name="ch.qos.logback.classic" level="INFO" />
- <logger name="ch.qos.logback.core" level="INFO" />
- <root level="INFO">
- <appender-ref ref="asyncEELFDebug" />
- </root>
-</configuration> \ No newline at end of file
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+
+ <property name="logDir" value="/var/log/onap" />
+
+ <!-- <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy"
+ | "SDNC" | "AC" -->
+ <property name="componentName" value="AAI-UI"></property>
+
+ <!-- default eelf log file names -->
+ <property name="generalLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+
+ <property name="errorLogPattern"
+ value="%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%mdc{RequestId}|%thread|AAIUI|%mdc{PartnerName}|%logger|%.-5level|%msg%n" />
+ <property name="auditMetricPattern" value="%m%n" />
+
+ <property name="logDirectory" value="${logDir}/${componentName}" />
+
+
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+
+
+ <!-- EELF Audit Appender. This appender is used to record audit engine related
+ logging events. The audit logger and appender are specializations of the
+ EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+
+ <appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${auditMetricPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+
+ <appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n"</pattern> -->
+ <pattern>${auditMetricPattern}</pattern>
+ </encoder>
+ </appender>
+
+
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics" />
+ </appender>
+
+ <appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${debugLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>false</includeCallerData>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger name="com.att.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFDebug" />
+ </logger>
+
+ <logger name="com.att.eelf.audit" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+ <logger name="com.att.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+
+ <!-- Spring related loggers -->
+ <logger name="org.springframework" level="WARN" />
+ <logger name="org.springframework.beans" level="WARN" />
+ <logger name="org.springframework.web" level="WARN" />
+ <logger name="com.blog.spring.jms" level="WARN" />
+
+ <!-- Sparky loggers -->
+ <logger name="org.onap" level="INFO" />
+
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger name="net.sf" level="WARN" />
+ <logger name="org.apache.commons.httpclient" level="WARN" />
+ <logger name="org.apache.commons" level="WARN" />
+ <logger name="org.apache.coyote" level="WARN" />
+ <logger name="org.apache.jasper" level="WARN" />
+
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger name="org.apache.camel" level="WARN" />
+ <logger name="org.apache.cxf" level="WARN" />
+ <logger name="org.apache.camel.processor.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.service" level="WARN" />
+ <logger name="org.restlet" level="WARN" />
+ <logger name="org.apache.camel.component.restlet" level="WARN" />
+
+ <!-- logback internals logging -->
+ <logger name="ch.qos.logback.classic" level="WARN" />
+ <logger name="ch.qos.logback.core" level="WARN" />
+
+ <root>
+ <appender-ref ref="asyncEELF" />
+ <!-- <appender-ref ref="asyncEELFDebug" /> -->
+ </root>
+
+</configuration>
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/portal.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/BOOT-INF/classes/portal.properties
index 04a19555e2..35b714a5e9 100644
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/portal.properties
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/BOOT-INF/classes/portal.properties
@@ -1,16 +1,16 @@
-######################################################################################
-############################## eCOMP Portal properties ###############################
-######################################################################################
+################################################################################
+############################## Portal properties ###############################
+################################################################################
# Java class that implements the ECOMP role and user mgt API
-portal.api.impl.class = org.openecomp.sparky.security.portal.PortalRestAPIServiceImpl
+portal.api.impl.class = org.onap.aai.sparky.security.portal.PortalRestAPIServiceImpl
# Instance of ECOMP Portal where the app has been on-boarded
# use insecure http for dev purposes to avoid self-signed certificate
-ecomp_rest_url = http://portalapps.{{.Release.Namespace}}:8989/ONAPPORTAL/auxapi
+ecomp_rest_url = http://portal-app.{{.Release.Namespace}}:8989/ONAPPORTAL/auxapi
# Standard global logon page
-ecomp_redirect_url = http://portalapps.{{.Release.Namespace}}:8989/ONAPPORTAL/login.htm
+ecomp_redirect_url = http://portal-app.{{.Release.Namespace}}:8989/ONAPPORTAL/login.htm
# Name of cookie to extract on login request
csp_cookie_name = EPService
@@ -20,4 +20,4 @@ csp_gate_keeper_prod_key = PROD
# Toggles use of UEB
ueb_listeners_enable = false
# IDs application withing UEB flow
-ueb_app_key = qFKles9N8gDTV0Zc
+ueb_app_key = qFKles9N8gDTV0Zc
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/portal-authentication.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/portal-authentication.properties
index 0873fc1c61..a0e3308435 100644
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/portal-authentication.properties
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/portal-authentication.properties
@@ -1,14 +1,14 @@
-##########################################################################################
-############################## eCOMP Portal Auth Properties ##############################
-##########################################################################################
-
-############################## Auth ##############################
-username=aaiui
-password=1t2v1vfv1unz1vgz1t3b
-
-############################## ##############################
-#
-# ONAP Cookie Processing - During initial development, this flag, if true, will
-# prevent the portal interface's login processing from searching for a user
-# specific cookie, and will instead allow passage if a valid session cookie is discovered.
-onap_enabled=true \ No newline at end of file
+#####################################################################################
+############################## Portal Auth Properties ##############################
+#####################################################################################
+
+############################## Auth ##############################
+username={{.Values.config.portalUsername}}
+password={{.Values.config.portalPassword}}
+
+############################## ##############################
+#
+# ONAP Cookie Processing - During initial development, this flag, if true, will
+# prevent the portal interface's login processing from searching for a user
+# specific cookie, and will instead allow passage if a valid session cookie is discovered.
+onap_enabled={{.Values.config.portalOnapEnabled}}
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/roles.config b/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/roles.config
new file mode 100644
index 0000000000..b8313bd378
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/portal/roles.config
@@ -0,0 +1,6 @@
+[
+ {
+ "id":1,
+ "name":"View"
+ }
+] \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/roles.config b/kubernetes/aai/charts/aai-sparky-be/resources/config/roles.config
index b8313bd378..4d5d43e2ce 100644
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/roles.config
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/roles.config
@@ -1,6 +1,6 @@
-[
- {
- "id":1,
- "name":"View"
- }
+[
+ {
+ "id":1,
+ "name":"View"
+ }
] \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/autoSuggestMappings.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/autoSuggestMappings.json
new file mode 100644
index 0000000000..78576172fb
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/autoSuggestMappings.json
@@ -0,0 +1,10 @@
+{
+ "properties" : {
+ "entity_suggest" : {
+ "type" : "completion",
+ "payloads" : true,
+ "analyzer" : "custom_analyzer",
+ "preserve_position_increments": false
+ }
+ }
+} \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/autoSuggestSettings.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/autoSuggestSettings.json
new file mode 100644
index 0000000000..4525be12fc
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/autoSuggestSettings.json
@@ -0,0 +1,21 @@
+{
+ "analysis": {
+ "filter": {
+ "eng_stop": {
+ "type": "stop",
+ "stopwords": "_english_"
+ }
+ },
+ "analyzer": {
+ "custom_analyzer": {
+ "type": "custom",
+ "tokenizer": "standard",
+ "filter": [
+ "lowercase",
+ "asciifolding",
+ "eng_stop"
+ ]
+ }
+ }
+ }
+ } \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/dynamicMappings.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/dynamicMappings.json
new file mode 100644
index 0000000000..09a00acdf6
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/dynamicMappings.json
@@ -0,0 +1,14 @@
+{
+ "dynamic_templates": [
+ {
+ "strings": {
+ "match_mapping_type": "string",
+ "match": "*",
+ "mapping": {
+ "type": "string",
+ "index": "not_analyzed"
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/entityCountHistoryMappings.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/entityCountHistoryMappings.json
new file mode 100644
index 0000000000..84e3aec4c7
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/entityCountHistoryMappings.json
@@ -0,0 +1,16 @@
+{
+ "properties": {
+ "count": {
+ "type": "long"
+ },
+ "entityType": {
+ "type": "string",
+ "index": "not_analyzed"
+ },
+ "timestamp": {
+ "type": "date",
+ "format": "MMM d y HH:m:s||dd-MM-yyyy HH:mm:ss||yyyy-MM-dd'T'HH:mm:ss.SSSZZ||MM/dd/yyyy||yyyyMMdd'T'HHmmssZ"
+ }
+ }
+}
+
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/es_mappings.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/es_mappings.json
new file mode 100644
index 0000000000..216e3d96b6
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/es_mappings.json
@@ -0,0 +1,32 @@
+{
+ "properties": {
+ "entityType": {
+ "type": "string",
+ "analyzer": "ngram_analyzer",
+ "search_analyzer": "ngram_analyzer"
+ },
+ "entityPrimaryKeyValue": {
+ "type": "string",
+ "index": "not_analyzed"
+ },
+ "searchTagIDs": {
+ "type": "string"
+ },
+ "searchTags": {
+ "type": "string",
+ "analyzer": "ngram_analyzer"
+ },
+ "crossEntityReferenceValues": {
+ "type": "string",
+ "analyzer": "ngram_analyzer"
+ },
+ "link": {
+ "type": "string",
+ "index": "not_analyzed"
+ },
+ "lastmodTimestamp": {
+ "type": "date",
+ "format": "MMM d y HH:m:s||dd-MM-yyyy HH:mm:ss||yyyy-MM-dd'T'HH:mm:ss.SSSZZ||yyyy-MM-dd HH:mm:ss||MM/dd/yyyy||yyyyMMdd'T'HHmmssZ"
+ }
+ }
+} \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/es_settings.json b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/es_settings.json
new file mode 100644
index 0000000000..21a357c615
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/schemas/es_settings.json
@@ -0,0 +1,36 @@
+{
+ "analysis": {
+ "filter": {
+ "ngram_filter": {
+ "type": "nGram",
+ "min_gram": 1,
+ "max_gram": 50,
+ "token_chars": [
+ "letter",
+ "digit",
+ "punctuation",
+ "symbol"
+ ]
+ }
+ },
+ "analyzer": {
+ "ngram_analyzer": {
+ "type": "custom",
+ "tokenizer": "whitespace",
+ "filter": [
+ "lowercase",
+ "asciifolding",
+ "ngram_filter"
+ ]
+ },
+ "whitespace_analyzer": {
+ "type": "custom",
+ "tokenizer": "whitespace",
+ "filter": [
+ "lowercase",
+ "asciifolding"
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/search-service.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/search-service.properties
deleted file mode 100644
index f28047333a..0000000000
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/search-service.properties
+++ /dev/null
@@ -1,32 +0,0 @@
-########################################################################################
-############################## Search Data Service Config ##############################
-########################################################################################
-
-############################## Networking ##############################
-#
-# The ip address/hostname and port to the desired Search Data Service instance
-#
-search-service.ipAddress=aai-search-data.{{.Release.Namespace}}
-search-service.httpPort=9509
-
-############################## Indexes ##############################
-#
-# Index values that will be associated with searches
-#
-# Searchable entities
-search-service.indexName=entitysearchindex
-# Inventory searches
-search-service.topographicalIndexName=topographicalsearchindex
-search-service.entityCountHistoryIndexName=entitycounthistoryindex
-
-############################## Version ##############################
-#
-# Search Data Service version and type (see Search Data Service for more details)
-#
-search-service.version=v1
-search-service.type=default
-
-############################## Certs ##############################
-search-service.ssl.cert-name=client-cert-onap.p12
-search-service.ssl.keystore-password=OBF:1y0q1uvc1uum1uvg1pil1pjl1uuq1uvk1uuu1y10
-search-service.ssl.keystore=tomcat_keystore
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-application.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-application.properties
new file mode 100644
index 0000000000..b246676fa7
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-application.properties
@@ -0,0 +1,11 @@
+camel.springboot.name = SparkyCamelContext
+#camel.springboot.xmlRoutes = file:${CONFIG_HOME}/camel-routes/*.route
+camel.springboot.xmlRests = file:${CONFIG_HOME}/camel-rests/*.xml
+
+camel.component.servlet.mapping.context-path=/rest/*
+server.servlet.context-path=/services/aai/webapp/
+
+#
+# attempt to externalize ui JS classes + resources
+#
+spring.resources.static-locations=file:${APP_HOME}/static/
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-http-config.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-http-config.properties
new file mode 100644
index 0000000000..f81a5976ef
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-http-config.properties
@@ -0,0 +1 @@
+server.port = 9517 \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-ssl-config.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-ssl-config.properties
new file mode 100644
index 0000000000..b9e5d16aac
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/sparky-ssl-config.properties
@@ -0,0 +1,3 @@
+server.port = 8000
+server.ssl.key-store=file:${CONFIG_HOME}/auth/tomcat_keystore
+server.ssl.key-alias=tomcat \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-aggregateVnfSearchProvider.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-aggregateVnfSearchProvider.xml
new file mode 100644
index 0000000000..694ea1bf0f
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-aggregateVnfSearchProvider.xml
@@ -0,0 +1,33 @@
+<beans xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="
+ http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+ <bean id="aggregateVnfSearchProvider"
+ class="org.onap.aai.sparky.aggregatevnf.search.AggregateVnfSearchProvider">
+ <constructor-arg ref="searchServiceAdapter"/>
+ <constructor-arg name="autoSuggestIndexName" value="entityautosuggestindex"/>
+ <constructor-arg name="vnfSearchSuggestionRoute" value="vnfSearch"/>
+ </bean>
+
+ <bean id="aggregateSummaryProcessor"
+ class="org.onap.aai.sparky.aggregatevnf.search.AggregateSummaryProcessor">
+ <constructor-arg ref="elasticSearchAdapter"/>
+ <constructor-arg ref="filtersConfig"/>
+ <property name="vnfAggregationIndexName" value="aggregate_generic-vnf_index"/>
+ </bean>
+
+ <bean id="registerAggregateVnfSearchProvider"
+ class="org.springframework.beans.factory.config.MethodInvokingFactoryBean">
+ <property name="targetObject" ref="searchProviderRegistry"/>
+ <property name="targetMethod">
+ <value>addSearchProviders</value>
+ </property>
+ <property name="arguments">
+ <list value-type="org.onap.aai.sparky.search.api.SearchProvider">
+ <ref bean="aggregateVnfSearchProvider"/>
+ </list>
+ </property>
+ </bean>
+</beans>
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-apigw.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-apigw.xml
new file mode 100644
index 0000000000..8b5f27727d
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-apigw.xml
@@ -0,0 +1,31 @@
+<beans xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="
+ http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+ <bean id="synapseRestEndpointConfig" class="org.onap.aai.sparky.dal.rest.config.RestEndpointConfig">
+ <property name="endpointIpAddress" value="aai.synapse.simpledemo.openecomp.org" />
+ <property name="endpointServerPort" value="9502" />
+ <property name="numRequestRetries" value="5" />
+ <property name="restAuthenticationMode" value="SSL_CERT" />
+ <property name="connectTimeoutInMs" value="60000" />
+ <property name="readTimeoutInMs" value="30000" />
+ <property name="certFileName" value="/auth/client-cert-onap.p12" />
+ <property name="certPassword" value="OBF:1y0q1uvc1uum1uvg1pil1pjl1uuq1uvk1uuu1y10" />
+ <property name="truststoreFileName" value="/auth/inventory-ui-keystore" />
+ <property name="validateServerCertChain" value="false" />
+ <property name="validateServerHostname" value="false" />
+ <property name="resourceLoader" ref="sparkyResourceLoader" />
+
+ </bean>
+
+ <bean id="aaiuiProxyProcessor"
+ class="org.onap.aai.sparky.dal.proxy.processor.AaiUiProxyProcessor">
+ <constructor-arg ref="synapseRestEndpointConfig" />
+ <constructor-arg name="apiGatewayEndpoint" value="ui-request" />
+ </bean>
+
+</beans>
+
+
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-gizmo.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-gizmo.xml
new file mode 100644
index 0000000000..dedd7e2d16
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-gizmo.xml
@@ -0,0 +1,29 @@
+<beans xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="
+ http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+ <bean id="gizmoRestEndpointConfig" class="org.onap.aai.sparky.dal.rest.config.RestEndpointConfig">
+ <property name="endpointIpAddress" value="{{.Values.global.gizmo.serviceName}}.{{.Release.Namespace}}" />
+ <property name="endpointServerPort" value="9520" />
+ <property name="numRequestRetries" value="5" />
+ <property name="restAuthenticationMode" value="SSL_CERT" />
+ <property name="connectTimeoutInMs" value="60000" />
+ <property name="readTimeoutInMs" value="30000" />
+ <property name="certFileName" value="/auth/client-cert-onap.p12" />
+ <property name="certPassword" value="OBF:1y0q1uvc1uum1uvg1pil1pjl1uuq1uvk1uuu1y10" />
+ <property name="truststoreFileName" value="/auth/inventory-ui-keystore" />
+ <property name="validateServerCertChain" value="false" />
+ <property name="validateServerHostname" value="false" />
+ <property name="resourceLoader" ref="sparkyResourceLoader" />
+ </bean>
+
+ <bean id="gizmoAdapter" class="org.onap.aai.sparky.dal.GizmoAdapter">
+ <constructor-arg ref="oxmModelLoader" />
+ <constructor-arg ref="gizmoRestEndpointConfig" />
+ <property name="relationshipsBasePath" value="/services/inventory/relationships/v13/" />
+ <property name="inventoryBasePath" value="/services/inventory/v13/" />
+ </bean>
+
+</beans>
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml
new file mode 100644
index 0000000000..0763f94cd1
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml
@@ -0,0 +1,373 @@
+<beans xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="
+ http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+ <bean id="elasticSearchSchemaFactory" class="org.onap.aai.sparky.sync.ElasticSearchSchemaFactory">
+ <property name="resourceLoader" ref="sparkyResourceLoader" />
+ </bean>
+
+
+ <bean id="elasticSearchRestEndpointConfig" class="org.onap.aai.sparky.dal.rest.config.RestEndpointConfig">
+ <property name="endpointIpAddress" value="{{.Values.global.aaiElasticsearch.serviceName}}.{{.Release.Namespace}}" />
+ <property name="endpointServerPort" value="9200" />
+ <property name="numRequestRetries" value="5" />
+ <property name="restAuthenticationMode" value="HTTP_NOAUTH" />
+ <property name="connectTimeoutInMs" value="60000" />
+ <property name="readTimeoutInMs" value="30000" />
+ </bean>
+
+ <bean id="elasticSearchAdapter" class="org.onap.aai.sparky.dal.ElasticSearchAdapter">
+ <constructor-arg ref="elasticSearchRestEndpointConfig" />
+ </bean>
+
+ <bean id="elasticSearchNetworkStatConfig" class="org.onap.aai.sparky.sync.config.NetworkStatisticsConfig">
+ <property name="numSamplesPerThreadForRunningAverage" value="100" />
+ <property name="bytesHistogramLabel" value="[Response Size In Bytes]" />
+ <property name="bytesHistogramMaxYAxis" value="1000000" />
+ <property name="bytesHistogramNumBins" value="20" />
+ <property name="bytesHistogramNumDecimalPoints" value="2" />
+ <property name="queueLengthHistogramLabel" value="[Queue Item Length]" />
+ <property name="queueLengthHistogramMaxYAxis" value="20000" />
+ <property name="queueLengthHistogramNumBins" value="20" />
+ <property name="queueLengthHistogramNumDecimalPoints" value="2" />
+ <property name="taskAgeHistogramLabel" value="[Task Age In Ms]" />
+ <property name="taskAgeHistogramMaxYAxis" value="600000" />
+ <property name="taskAgeHistogramNumBins" value="20" />
+ <property name="taskAgeHistogramNumDecimalPoints" value="2" />
+ <property name="responseTimeHistogramLabel" value="[Response Time In Ms]" />
+ <property name="responseTimeHistogramMaxYAxis" value="1000" />
+ <property name="responseTimeHistogramNumBins" value="20" />
+ <property name="responseTimeHistogramNumDecimalPoints" value="2" />
+ <property name="tpsHistogramLabel" value="[Transactions Per Second]" />
+ <property name="tpsHistogramMaxYAxis" value="100" />
+ <property name="tpsHistogramNumBins" value="20" />
+ <property name="tpsHistogramNumDecimalPoints" value="2" />
+ </bean>
+
+ <bean id="aaiNetworkStatConfig" class="org.onap.aai.sparky.sync.config.NetworkStatisticsConfig">
+ <property name="numSamplesPerThreadForRunningAverage" value="100" />
+ <property name="bytesHistogramLabel" value="[Response Size In Bytes]" />
+ <property name="bytesHistogramMaxYAxis" value="1000000" />
+ <property name="bytesHistogramNumBins" value="20" />
+ <property name="bytesHistogramNumDecimalPoints" value="2" />
+ <property name="queueLengthHistogramLabel" value="[Queue Item Length]" />
+ <property name="queueLengthHistogramMaxYAxis" value="20000" />
+ <property name="queueLengthHistogramNumBins" value="20" />
+ <property name="queueLengthHistogramNumDecimalPoints" value="2" />
+ <property name="taskAgeHistogramLabel" value="[Task Age In Ms]" />
+ <property name="taskAgeHistogramMaxYAxis" value="600000" />
+ <property name="taskAgeHistogramNumBins" value="20" />
+ <property name="taskAgeHistogramNumDecimalPoints" value="2" />
+ <property name="responseTimeHistogramLabel" value="[Response Time In Ms]" />
+ <property name="responseTimeHistogramMaxYAxis" value="10000" />
+ <property name="responseTimeHistogramNumBins" value="20" />
+ <property name="responseTimeHistogramNumDecimalPoints" value="2" />
+ <property name="tpsHistogramLabel" value="[Transactions Per Second]" />
+ <property name="tpsHistogramMaxYAxis" value="100" />
+ <property name="tpsHistogramNumBins" value="20" />
+ <property name="tpsHistogramNumDecimalPoints" value="2" />
+ </bean>
+
+ <bean id="aaiRestEndpointConfig" class="org.onap.aai.sparky.dal.rest.config.RestEndpointConfig">
+ <property name="endpointIpAddress" value="{{.Values.global.aai.serviceName}}" />
+ <property name="endpointServerPort" value="8443" />
+ <property name="numRequestRetries" value="5" />
+ <property name="restAuthenticationMode" value="SSL_BASIC" />
+ <property name="connectTimeoutInMs" value="60000" />
+ <property name="readTimeoutInMs" value="30000" />
+ <property name="basicAuthUserName" value="AaiUI" />
+ <property name="basicAuthPassword" value="OBF:1gfr1p571unz1p4j1gg7" />
+ <property name="truststoreFileName" value="/auth/inventory-ui-keystore" />
+ <property name="validateServerCertChain" value="false" />
+ <property name="validateServerHostname" value="false" />
+ <property name="resourceLoader" ref="sparkyResourceLoader" />
+ </bean>
+
+ <bean id="activeInventoryAdapter" class="org.onap.aai.sparky.dal.ActiveInventoryAdapter">
+ <constructor-arg ref="oxmModelLoader" />
+ <constructor-arg ref="oxmEntityLookup" />
+ <constructor-arg ref="aaiRestEndpointConfig" />
+ </bean>
+
+ <bean id="syncControllerRegistry" class="org.onap.aai.sparky.sync.SyncControllerRegistry" />
+
+ <bean id="elasticSearchEndpointConfig"
+ class="org.onap.aai.sparky.sync.config.ElasticSearchEndpointConfig">
+ <property name="esIpAddress" value="{{.Values.global.aaiElasticsearch.serviceName}}.{{.Release.Namespace}}" />
+ <property name="esServerPort" value="9200" />
+ <property name="scrollContextTimeToLiveInMinutes" value="5" />
+ <property name="scrollContextBatchRequestSize" value="5000" />
+ </bean>
+
+ <bean id="aggregationSyncControllerConfig" class="org.onap.aai.sparky.sync.config.SyncControllerConfig">
+ <property name="controllerName" value="Aggregation-Sync-Controller" />
+ <property name="enabled" value="true" />
+ <property name="syncTaskDelayInMs" value="5000" />
+ <property name="syncTaskFrequencyInDays" value="1" />
+ <property name="numSyncControllerWorkers" value="2" />
+ <property name="runOnceSyncEnabled" value="true" />
+ <property name="periodicSyncEnabled" value="false" />
+ <property name="numInternalSyncWorkers" value="2" />
+ <property name="numSyncElasticWorkers" value="5" />
+ <property name="numSyncActiveInventoryWorkers" value="5" />
+ <property name="targetSyncStartTimeStamp" value="05:00:00 UTC+00:00" />
+ </bean>
+
+ <bean id="aggregationSyncControllerFactory"
+ class="org.onap.aai.sparky.aggregation.sync.AggregationSyncControllerFactory"
+ init-method="registerController">
+
+ <constructor-arg ref="elasticSearchEndpointConfig" />
+ <constructor-arg ref="aggregationSyncControllerConfig" />
+ <constructor-arg ref="syncControllerRegistry" />
+ <constructor-arg ref="suggestionEntityLookup" />
+ <constructor-arg ref="oxmEntityLookup" />
+ <constructor-arg ref="elasticSearchSchemaFactory" />
+
+ <property name="esAdapter" ref="elasticSearchAdapter" />
+ <property name="esStatConfig" ref="elasticSearchNetworkStatConfig" />
+
+ <property name="aaiAdapter" ref="activeInventoryAdapter" />
+ <property name="aaiStatConfig" ref="aaiNetworkStatConfig" />
+
+ <property name="aggregationEntityToIndexMap">
+ <map>
+ <entry key="generic-vnf" value="aggregate_generic-vnf_index" />
+ </map>
+ </property>
+
+ <property name="indexNameToSchemaConfigMap">
+ <map>
+ <entry key="aggregate_generic-vnf_index">
+ <bean class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
+ <property name="indexName" value="aggregate_generic-vnf_index" />
+ <property name="indexDocType" value="default" />
+ <property name="indexMappingsFileName" value="/schemas/dynamicMappings.json" />
+ </bean>
+ </entry>
+ </map>
+ </property>
+
+ </bean>
+
+
+ <bean id="historicalEntitySyncControllerConfig" class="org.onap.aai.sparky.sync.config.SyncControllerConfig">
+
+ <property name="controllerName" value="Historical-Entity-Sync-Controller" />
+ <property name="enabled" value="true" />
+ <property name="syncTaskDelayInMs" value="5000" />
+ <property name="syncTaskFrequencyInDays" value="1" />
+ <property name="numSyncControllerWorkers" value="2" />
+ <property name="runOnceSyncEnabled" value="true" />
+ <property name="periodicSyncEnabled" value="false" />
+ <property name="numInternalSyncWorkers" value="2" />
+ <property name="numSyncElasticWorkers" value="5" />
+ <property name="numSyncActiveInventoryWorkers" value="5" />
+ <property name="targetSyncStartTimeStamp" value="05:00:00 UTC+00:00" />
+ </bean>
+
+ <bean id="historicalEntitySchemaConfig" class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
+ <property name="indexName" value="entitycounthistoryindex" />
+ <property name="indexDocType" value="default" />
+ <property name="indexMappingsFileName" value="/schemas/entityCountHistoryMappings.json" />
+ <property name="indexSettingsFileName" value="/schemas/es_settings.json" />
+ </bean>
+
+ <bean id="historicalEntitySyncController"
+ class="org.onap.aai.sparky.aggregation.sync.HistoricalEntitySyncController"
+ init-method="registerController">
+
+ <constructor-arg ref="historicalEntitySyncControllerConfig" />
+ <constructor-arg ref="activeInventoryAdapter" />
+ <constructor-arg ref="elasticSearchAdapter" />
+ <constructor-arg ref="historicalEntitySchemaConfig" />
+ <constructor-arg ref="elasticSearchEndpointConfig" />
+ <constructor-arg value="60" />
+ <constructor-arg ref="aaiNetworkStatConfig" />
+ <constructor-arg ref="elasticSearchNetworkStatConfig" />
+ <constructor-arg ref="searchableEntityLookup" />
+ <constructor-arg ref="elasticSearchSchemaFactory" />
+
+ <property name="syncControllerRegistry" ref="syncControllerRegistry" />
+
+ </bean>
+
+
+
+ <bean id="autoSuggestionSyncControllerConfig" class="org.onap.aai.sparky.sync.config.SyncControllerConfig">
+
+ <property name="controllerName" value="Auto-Suggestion-Sync-Controller" />
+ <property name="enabled" value="true" />
+ <property name="syncTaskDelayInMs" value="5000" />
+ <property name="syncTaskFrequencyInDays" value="1" />
+ <property name="numSyncControllerWorkers" value="2" />
+ <property name="runOnceSyncEnabled" value="true" />
+ <property name="periodicSyncEnabled" value="false" />
+ <property name="numInternalSyncWorkers" value="2" />
+ <property name="numSyncElasticWorkers" value="5" />
+ <property name="numSyncActiveInventoryWorkers" value="5" />
+ <property name="targetSyncStartTimeStamp" value="05:00:00 UTC+00:00" />
+ </bean>
+
+ <bean id="autoSuggestionEntitySchemaConfig" class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
+ <property name="indexName" value="entityautosuggestindex" />
+ <property name="indexDocType" value="default" />
+ <property name="indexMappingsFileName" value="/schemas/autoSuggestMappings.json" />
+ <property name="indexSettingsFileName" value="/schemas/autoSuggestSettings.json" />
+ </bean>
+
+ <bean id="autoSuggestionEntitySyncController"
+ class="org.onap.aai.sparky.autosuggestion.sync.AutoSuggestionSyncController"
+ init-method="registerController">
+
+ <constructor-arg ref="autoSuggestionSyncControllerConfig" />
+ <constructor-arg ref="activeInventoryAdapter" />
+ <constructor-arg ref="elasticSearchAdapter" />
+ <constructor-arg ref="autoSuggestionEntitySchemaConfig" />
+ <constructor-arg ref="elasticSearchEndpointConfig" />
+ <constructor-arg ref="aaiNetworkStatConfig" />
+ <constructor-arg ref="elasticSearchNetworkStatConfig" />
+ <constructor-arg ref="oxmEntityLookup" />
+ <constructor-arg ref="suggestionEntityLookup" />
+ <constructor-arg ref="filtersConfig" />
+ <constructor-arg ref="elasticSearchSchemaFactory" />
+
+ <property name="syncControllerRegistry" ref="syncControllerRegistry" />
+
+ </bean>
+
+
+
+ <bean id="vnfAliasSyncControllerConfig" class="org.onap.aai.sparky.sync.config.SyncControllerConfig">
+
+ <property name="controllerName" value="Vnf-Alias-Sync-Controller" />
+ <property name="enabled" value="true" />
+ <property name="syncTaskDelayInMs" value="5000" />
+ <property name="syncTaskFrequencyInDays" value="1" />
+ <property name="numSyncControllerWorkers" value="2" />
+ <property name="runOnceSyncEnabled" value="true" />
+ <property name="periodicSyncEnabled" value="false" />
+ <property name="numInternalSyncWorkers" value="2" />
+ <property name="numSyncElasticWorkers" value="5" />
+ <property name="numSyncActiveInventoryWorkers" value="5" />
+ <property name="targetSyncStartTimeStamp" value="05:00:00 UTC+00:00" />
+ </bean>
+
+ <bean id="vnfAliasEntitySchemaConfig" class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
+ <property name="indexName" value="entityautosuggestindex" />
+ <property name="indexDocType" value="default" />
+ <property name="indexMappingsFileName" value="/schemas/autoSuggestMappings.json" />
+ <property name="indexSettingsFileName" value="/schemas/autoSuggestSettings.json" />
+ </bean>
+
+ <bean id="vnfAliasSyncController"
+ class="org.onap.aai.sparky.autosuggestion.sync.VnfAliasSyncController"
+ init-method="registerController">
+
+ <constructor-arg ref="vnfAliasSyncControllerConfig" />
+ <constructor-arg ref="activeInventoryAdapter" />
+ <constructor-arg ref="elasticSearchAdapter" />
+ <constructor-arg ref="vnfAliasEntitySchemaConfig" />
+ <constructor-arg ref="elasticSearchEndpointConfig" />
+ <constructor-arg ref="aaiNetworkStatConfig" />
+ <constructor-arg ref="elasticSearchNetworkStatConfig" />
+ <constructor-arg ref="filtersConfig" />
+ <constructor-arg ref="elasticSearchSchemaFactory" />
+
+ <property name="syncControllerRegistry" ref="syncControllerRegistry" />
+
+ </bean>
+
+ <bean id="geoSyncControllerConfig" class="org.onap.aai.sparky.sync.config.SyncControllerConfig">
+
+ <property name="controllerName" value="Geo-Sync-Controller" />
+ <property name="enabled" value="true" />
+ <property name="syncTaskDelayInMs" value="5000" />
+ <property name="syncTaskFrequencyInDays" value="1" />
+ <property name="numSyncControllerWorkers" value="2" />
+ <property name="runOnceSyncEnabled" value="true" />
+ <property name="periodicSyncEnabled" value="false" />
+ <property name="numInternalSyncWorkers" value="2" />
+ <property name="numSyncElasticWorkers" value="5" />
+ <property name="numSyncActiveInventoryWorkers" value="5" />
+ <property name="targetSyncStartTimeStamp" value="05:00:00 UTC+00:00" />
+ </bean>
+
+ <bean id="geoEntitySchemaConfig" class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
+ <property name="indexName" value="topographicalsearchindex" />
+ <property name="indexDocType" value="default" />
+ <property name="indexMappingsFileName" value="/schemas/dynamicMappings.json" />
+ </bean>
+
+ <bean id="geoSyncController"
+ class="org.onap.aai.sparky.topology.sync.GeoSyncController"
+ init-method="registerController">
+
+ <constructor-arg ref="geoSyncControllerConfig" />
+ <constructor-arg ref="activeInventoryAdapter" />
+ <constructor-arg ref="elasticSearchAdapter" />
+ <constructor-arg ref="geoEntitySchemaConfig" />
+ <constructor-arg ref="elasticSearchEndpointConfig" />
+ <constructor-arg ref="aaiNetworkStatConfig" />
+ <constructor-arg ref="elasticSearchNetworkStatConfig" />
+ <constructor-arg ref="geoEntityLookup" />
+ <constructor-arg ref="oxmEntityLookup" />
+ <constructor-arg ref="elasticSearchSchemaFactory" />
+
+ <property name="syncControllerRegistry" ref="syncControllerRegistry" />
+
+ </bean>
+
+ <bean id="viewInspectControllerConfig" class="org.onap.aai.sparky.sync.config.SyncControllerConfig">
+
+ <property name="controllerName" value="View-Inspect-Sync-Controller" />
+ <property name="enabled" value="true" />
+ <property name="syncTaskDelayInMs" value="5000" />
+ <property name="syncTaskFrequencyInDays" value="1" />
+ <property name="numSyncControllerWorkers" value="2" />
+ <property name="runOnceSyncEnabled" value="true" />
+ <property name="periodicSyncEnabled" value="false" />
+ <property name="numInternalSyncWorkers" value="2" />
+ <property name="numSyncElasticWorkers" value="5" />
+ <property name="numSyncActiveInventoryWorkers" value="5" />
+ <property name="targetSyncStartTimeStamp" value="05:00:00 UTC+00:00" />
+ </bean>
+
+ <bean id="viewInspectySchemaConfig" class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
+ <property name="indexName" value="entitysearchindex" />
+ <property name="indexDocType" value="default" />
+ <property name="indexMappingsFileName" value="/schemas/es_mappings.json" />
+ <property name="indexSettingsFileName" value="/schemas/es_settings.json" />
+ </bean>
+
+
+ <bean id="viewInspectSyncController"
+ class="org.onap.aai.sparky.viewinspect.sync.ViewInspectSyncController"
+ init-method="registerController">
+
+ <constructor-arg ref="viewInspectControllerConfig" />
+ <constructor-arg ref="activeInventoryAdapter" />
+ <constructor-arg ref="elasticSearchAdapter" />
+ <constructor-arg ref="viewInspectySchemaConfig" />
+ <constructor-arg ref="elasticSearchEndpointConfig" />
+ <constructor-arg ref="aaiNetworkStatConfig" />
+ <constructor-arg ref="elasticSearchNetworkStatConfig" />
+ <constructor-arg ref="crossEntityReferenceLookup" />
+ <constructor-arg ref="oxmEntityLookup" />
+ <constructor-arg ref="searchableEntityLookup" />
+ <constructor-arg ref="elasticSearchSchemaFactory" />
+
+ <property name="syncControllerRegistry" ref="syncControllerRegistry" />
+
+ </bean>
+
+ <bean id="syncControllerService" class="org.onap.aai.sparky.sync.SyncControllerService"
+ destroy-method="shutdown">
+ <constructor-arg ref="syncControllerRegistry" />
+ <constructor-arg value="5" />
+ <constructor-arg value="5" />
+ </bean>
+
+</beans>
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-viewInspect.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-viewInspect.xml
new file mode 100644
index 0000000000..c146e6ef2b
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-viewInspect.xml
@@ -0,0 +1,55 @@
+<beans xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="
+ http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+ <bean id="visualizationConfigurations"
+ class="org.onap.aai.sparky.viewandinspect.config.VisualizationConfigs">
+ <property name="maxSelfLinkTraversalDepth" value="2" />
+ <property name="visualizationDebugEnabled" value="false" />
+ <property name="aaiEntityNodeDescriptors" value="/descriptors/aaiEntityNodeDescriptors.json" />
+ <property name="generalNodeClassName" value="generalNodeClass" />
+ <property name="searchNodeClassName" value="searchedNodeClass" />
+ <property name="selectedSearchedNodeClassName" value="selectedSearchedNodeClass" />
+ <property name="numOfThreadsToFetchNodeIntegrity" value="20" />
+ <property name="makeAllNeighborsBidirectional" value="false" />
+ <property name="gizmoEnabled" value="false" />
+ <property name="resourceLoader" ref="sparkyResourceLoader" />
+
+ <property name="shallowEntities">
+ <list value-type="java.lang.String">
+ <value>cloud-region</value>
+ <value>complex</value>
+ <value>vnf-image</value>
+ <value>image</value>
+ <value>flavor</value>
+ <value>availability-zone</value>
+ <value>tenant</value>
+ <value>network-profile</value>
+ <value>l-interface</value>
+ </list>
+ </property>
+
+ </bean>
+
+ <bean id="baseVisualizationService"
+ class="org.onap.aai.sparky.viewandinspect.services.BaseVisualizationService">
+ <constructor-arg ref="oxmModelLoader"/>
+ <constructor-arg ref="visualizationConfigurations"/>
+ <constructor-arg ref="activeInventoryAdapter" />
+ <constructor-arg ref="gizmoAdapter" />
+ <constructor-arg ref="elasticSearchAdapter" />
+ <constructor-arg ref="elasticSearchEndpointConfig" />
+ <constructor-arg ref="viewInspectySchemaConfig" />
+ <constructor-arg name="numActiveInventoryWorkers" value="50" />
+ <constructor-arg ref="oxmEntityLookup" />
+ <constructor-arg ref="subscriptionConfig" />
+ </bean>
+
+ <bean id="schemaVisualizationProcessor"
+ class="org.onap.aai.sparky.viewandinspect.SchemaVisualizationProcessor">
+ <property name="visualizationService" ref="baseVisualizationService" />
+ </bean>
+
+</beans>
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-viewInspectSearchProvider.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-viewInspectSearchProvider.xml
new file mode 100644
index 0000000000..072c935574
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-viewInspectSearchProvider.xml
@@ -0,0 +1,34 @@
+<beans xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="
+ http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+ <bean id="viewInspectSearchProvider"
+ class="org.onap.aai.sparky.viewandinspect.search.ViewInspectSearchProvider">
+ <constructor-arg ref="searchServiceAdapter" />
+ <constructor-arg ref="suggestiveSearchConfigs" />
+ <constructor-arg name="viewInspectIndexName" value="entitysearchindex" />
+ <constructor-arg name="viewInspectSuggestionRoute" value="schema" />
+ <constructor-arg ref="oxmEntityLookup" />
+
+ <property name="additionalSearchSuggestionText">
+ <value>Schema</value>
+ </property>
+
+ </bean>
+
+ <bean id="registerViewInspectSearchProvider"
+ class="org.springframework.beans.factory.config.MethodInvokingFactoryBean">
+ <property name="targetObject" ref="searchProviderRegistry" />
+ <property name="targetMethod">
+ <value>addSearchProviders</value>
+ </property>
+ <property name="arguments">
+ <list value-type="org.onap.aai.sparky.search.api.SearchProvider">
+ <ref bean="viewInspectSearchProvider" />
+ </list>
+ </property>
+ </bean>
+
+</beans>
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core.xml
new file mode 100644
index 0000000000..8f5d42d25a
--- /dev/null
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core.xml
@@ -0,0 +1,241 @@
+<beans xmlns="http://www.springframework.org/schema/beans"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="
+ http://www.springframework.org/schema/beans
+ http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+ <bean id="searchServiceRestEndpointConfig" class="org.onap.aai.sparky.dal.rest.config.RestEndpointConfig">
+ <property name="endpointIpAddress" value="{{.Values.global.searchData.serviceName}}.{{.Release.Namespace}}" />
+ <property name="endpointServerPort" value="9509" />
+ <property name="numRequestRetries" value="5" />
+ <property name="restAuthenticationMode" value="SSL_CERT" />
+ <property name="connectTimeoutInMs" value="60000" />
+ <property name="readTimeoutInMs" value="30000" />
+ <property name="certFileName" value="/auth/client-cert-onap.p12" />
+ <property name="certPassword" value="OBF:1y0q1uvc1uum1uvg1pil1pjl1uuq1uvk1uuu1y10" />
+ <property name="truststoreFileName" value="/auth/inventory-ui-keystore" />
+ <property name="validateServerCertChain" value="false" />
+ <property name="validateServerHostname" value="false" />
+ <property name="resourceLoader" ref="sparkyResourceLoader" />
+ </bean>
+
+ <bean id="searchServiceAdapter" class="org.onap.aai.sparky.search.SearchServiceAdapter">
+ <constructor-arg ref="searchServiceRestEndpointConfig" />
+ <constructor-arg name="serviceApiVersion" value="v1" />
+ </bean>
+
+ <bean id="filtersConfig" class="org.onap.aai.sparky.search.filters.config.FiltersConfig">
+ <constructor-arg name="filtersFileName" value="/filters/aaiui_filters.json" />
+ <constructor-arg name="viewsFileName" value="/filters/aaiui_views.json" />
+ <constructor-arg name="resourceLoader" ref="sparkyResourceLoader" />
+ </bean>
+
+ <bean id="crossEntityReferenceLookup"
+ class="org.onap.aai.sparky.config.oxm.CrossEntityReferenceLookup" />
+
+ <bean id="geoEntityLookup" class="org.onap.aai.sparky.config.oxm.GeoEntityLookup" />
+
+ <bean id="oxmEntityLookup" class="org.onap.aai.sparky.config.oxm.OxmEntityLookup" />
+
+ <bean id="searchableEntityLookup" class="org.onap.aai.sparky.config.oxm.SearchableEntityLookup" />
+
+ <bean id="suggestionEntityLookup" class="org.onap.aai.sparky.config.oxm.SuggestionEntityLookup">
+ <constructor-arg ref="filtersConfig" />
+ </bean>
+
+ <bean id="oxmModelProcessorSet" class="org.springframework.beans.factory.config.SetFactoryBean">
+ <property name="targetSetClass" value="java.util.HashSet" />
+ <property name="sourceSet">
+ <set>
+ <ref bean="crossEntityReferenceLookup" />
+ <ref bean="geoEntityLookup" />
+ <ref bean="oxmEntityLookup" />
+ <ref bean="searchableEntityLookup" />
+ <ref bean="suggestionEntityLookup" />
+ </set>
+ </property>
+ </bean>
+
+ <bean id="oxmModelLoader" class="org.onap.aai.sparky.config.oxm.OxmModelLoader" init-method="loadLatestOxmModel">
+ <!-- the int argument is the latestVersionOverride. -1 disables the override -->
+ <constructor-arg type="int" value="-1" />
+ <constructor-arg ref="oxmModelProcessorSet" />
+ </bean>
+
+ <bean id="searchProviderRegistry"
+ class="org.onap.aai.sparky.search.registry.SearchProviderRegistry" />
+
+ <bean id="sparkyResourceLoader" class="org.onap.aai.sparky.config.SparkyResourceLoader">
+ <property name="configHomeEnvVar" value="CONFIG_HOME" />
+ </bean>
+
+ <bean id="filterElasticSearchAdapter"
+ class="org.onap.aai.sparky.search.filters.FilterElasticSearchAdapter">
+ <constructor-arg ref="elasticSearchAdapter" />
+ </bean>
+
+ <bean id="filteredSearchHelper" class="org.onap.aai.sparky.search.filters.FilteredSearchHelper" >
+ <constructor-arg ref="filtersConfig" />
+ <constructor-arg ref="filterElasticSearchAdapter" />
+ </bean>
+
+ <bean id="filterProcessor" class="org.onap.aai.sparky.search.filters.FilterProcessor" >
+ <property name="filteredSearchHelper" ref="filteredSearchHelper" />
+ </bean>
+
+ <bean id="unifiedSearchProcessor" class="org.onap.aai.sparky.search.UnifiedSearchProcessor">
+ <property name="searchProviderRegistry" ref="searchProviderRegistry" />
+ </bean>
+
+ <bean id="entityCountHistoryProcessor" class="org.onap.aai.sparky.search.EntityCountHistoryProcessor">
+ <constructor-arg ref="elasticSearchAdapter" />
+ <constructor-arg name="entityTypesToSummarizeDelimitedList"
+ value="customer,service-instance,complex,pserver,vserver,vnf" />
+ <constructor-arg name="vnfEntityTypesDelimitedList"
+ value="vnf,newvce,vce,vpe" />
+ <constructor-arg name="entityCountHistoryIndexName"
+ value="entitycounthistoryindex" />
+ </bean>
+
+ <bean id="attributeUpdater"
+ class="org.onap.aai.sparky.editattributes.AttributeUpdater">
+ <constructor-arg ref="oxmModelLoader" />
+ <constructor-arg ref="oxmEntityLookup" />
+ <constructor-arg ref="activeInventoryAdapter" />
+ </bean>
+
+ <bean id="attributeEditProcessor"
+ class="org.onap.aai.sparky.editattributes.AttributeEditProcessor">
+ <constructor-arg ref="attributeUpdater" />
+ </bean>
+
+ <bean id="geoVisualizationProcessor" class="org.onap.aai.sparky.inventory.GeoVisualizationProcessor">
+ <constructor-arg ref="elasticSearchAdapter" />
+ <constructor-arg name="topographicalSearchIndexName"
+ value="topographicalsearchindex" />
+ </bean>
+
+ <bean id = "pairingHashMap" class="org.springframework.beans.factory.config.MapFactoryBean">
+ <property name="targetMapClass">
+ <value>java.util.HashMap</value>
+ </property>
+ <property name="sourceMap">
+ <map key-type="java.lang.String" value-type="java.lang.String" >
+ <entry key="volume-group-id" value="called" />
+ <entry key="volume-group-name" value="called" />
+ <entry key="physical-location-id" value="called" />
+ <entry key="data-center-code" value="called" />
+ <entry key="complex-name" value="called" />
+ <entry key="tenant-id" value="called" />
+ <entry key="tenant-name" value="called" />
+ <entry key="vserver-id" value="called" />
+ <entry key="vserver-name" value="called" />
+ <entry key="vserver-name2" value="called" />
+ <entry key="hostname" value="called" />
+ <entry key="pserver-name2" value="called" />
+ <entry key="pserver-id" value="called" />
+ <entry key="global-customer-id" value="called" />
+ <entry key="subscriber-name" value="called" />
+ <entry key="service-instance-id" value="called" />
+ <entry key="service-instance-name" value="called" />
+ <entry key="link-name" value="called" />
+ <entry key="vpn-id" value="called" />
+ <entry key="vpn-name" value="called" />
+ <entry key="vpe-id" value="called" />
+ <entry key="vnf-id" value="called" />
+ <entry key="vnf-name" value="called" />
+ <entry key="vnf-name2" value="called" />
+ <entry key="vnfc-name" value="called" />
+ <entry key="network-id" value="called" />
+ <entry key="network-name" value="called" />
+ <entry key="network-policy-id" value="called" />
+ <entry key="vf-module-id" value="called" />
+ <entry key="vf-module-name" value="called" />
+ <entry key="vnf-id2" value="called" />
+ <entry key="pnf-name" value="called" />
+ <entry key="circuit-id" value="called" />
+ <entry key="id" value="called" />
+ <entry key="group-id" value="called" />
+ <entry key="group-name" value="called" />
+ <entry key="street1" value="at" />
+ <entry key="street2" value="at" />
+ <entry key="code" value="at" />
+ <entry key="ipv4-oam-address" value="at" />
+ <entry key="network-policy-fqdn" value="at" />
+ </map>
+ </property>
+ </bean>
+
+ <bean id="suggestiveSearchConfigs" class="org.onap.aai.sparky.search.config.SuggestionConfig">
+ <property name="defaultPairingValue" value="with" />
+ <property name="pairingList" ref="pairingHashMap"></property>
+ <property name="stopWords">
+ <list value-type="java.lang.String">
+ <value>a</value>
+ <value>an</value>
+ <value>and</value>
+ <value>are</value>
+ <value>as</value>
+ <value>at</value>
+ <value>be</value>
+ <value>but</value>
+ <value>by</value>
+ <value>called</value>
+ <value>for</value>
+ <value>if</value>
+ <value>in</value>
+ <value>into</value>
+ <value>is</value>
+ <value>it</value>
+ <value>no</value>
+ <value>not</value>
+ <value>of</value>
+ <value>on</value>
+ <value>or</value>
+ <value>such</value>
+ <value>that</value>
+ <value>the</value>
+ <value>their</value>
+ <value>then</value>
+ <value>there</value>
+ <value>these</value>
+ <value>they</value>
+ <value>this</value>
+ <value>to</value>
+ <value>was</value>
+ <value>will</value>
+ <value>with</value>
+ </list>
+ </property>
+
+ </bean>
+
+ <bean id="subscriptionConfig"
+ class="org.onap.aai.sparky.subscription.config.SubscriptionConfig">
+ <property name="subscriptionTarget" value="" />
+ <property name="subscriptionOrigin" value="" />
+ <property name="subscriptionMessageType" value="" />
+ <property name="subscriptionTopic" value="" />
+ <property name="launchOITarget" value="" />
+ <property name="launchOIOrigin" value="" />
+ <property name="launchOIMessageType" value="" />
+ <property name="launchOITopic" value="" />
+ <property name="annEntitiyTypes">
+ <list value-type="java.lang.String">
+ <value>pserver</value>
+ <value>pnf</value>
+ </list>
+ </property>
+ <property name="isLaunchOIEnabled" value="false" />
+ </bean>
+
+ <bean id="subscriptionService"
+ class="org.onap.aai.sparky.subscription.services.SubscriptionService">
+ <constructor-arg ref="subscriptionConfig" />
+ </bean>
+
+ <bean id="subscriptionServiceProcessor"
+ class="org.onap.aai.sparky.subscription.SubscriptionServiceProcessor">
+ <constructor-arg ref="subscriptionService" />
+ </bean>
+</beans>
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/suggestive-search.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/suggestive-search.properties
deleted file mode 100644
index b82baffc14..0000000000
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/suggestive-search.properties
+++ /dev/null
@@ -1,27 +0,0 @@
-######################################################################################
-############################## Suggestive Search Config ##############################
-######################################################################################
-
-# Indexes to be taken into account when generating suggestion entries
-suggestion.indexes=elasticsearch.autosuggestIndexname,elasticsearch.indexName
-# List of stop words to be used during suggestive search
-suggestion.stopwords=a,an,and,are,as,at,be,but,by,called,for,if,in,into,is,it,no,not,of,on,or,such,that,the,their,then,there,these,they,this,to,was,will,with
-# Assigns which class, within sparky, will process the searches related to an assosiated index
-suggestion.routing=elasticsearch.autosuggestIndexname:SearchServiceWrapper,elasticsearch.indexName:VnfSearchService
-
-############################## Pairings ##############################
-#
-# "called" pairings, keys reference types within the OXM, and the value
-# is the suggestion term used for matches with any of the "called" keys.
-# e.g. "x called vserver-id" (but actual value of vserver-id)
-suggestion.pairing.called.key=volume-group-id,volume-group-name,physical-location-id,data-center-code,complex-name,tenant-id,tenant-name,vserver-id,vserver-name,vserver-name2,hostname,pserver-name2,pserver-id,global-customer-id,subscriber-name,service-instance-id,service-instance-name,link-name,vpn-id,vpn-name,vpe-id,vnf-id,vnf-name,vnf-name2,vnfc-name,network-id,network-name,network-policy-id,vf-module-id,vf-module-name,vnf-id2,pnf-name,circuit-id
-suggestion.pairing.called.value=called
-#
-# Exact same explanation as the "called" pairings above.
-# e.g. "x at ipv4-oam-address"
-suggestion.pairing.at.key=street1,street2,postal-code,ipv4-oam-address,network-policy-fqdn
-suggestion.pairing.at.value=at
-#
-# Default pairing values for any OXM types that aren't part of the the other
-# pairing lists.
-suggestion.pairing.default.value=with \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/synchronizer.properties b/kubernetes/aai/charts/aai-sparky-be/resources/config/synchronizer.properties
deleted file mode 100644
index 0b84f06abe..0000000000
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/synchronizer.properties
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################################
-############################## ElasticSearchSynchronizer Config ##############################
-##############################################################################################
-
-# Initial delay on startup before starting synchronization tasks
-synchronizer.syncTask.initialDelayInMs=60000
-# The frequency at which the synchronizationtask will be run
-synchronizer.syncTask.taskFrequencyInDay=2
-
-# Time at which to run synchronization. Format = hh:mm:ss UTC(-/+)hh:mm
-synchronizer.syncTask.startTimestamp=05:00:00 UTC+00:00
-
-# Generates a count in elasticsearch related to inventory
-synchronizer.historicalEntitySummarizerEnabled=true
-# Toggles the suggestion synchronizer
-synchronizer.autosuggestSynchronizationEnabled=true
-# Frequency at which above count is generated
-synchronizer.historicalEntitySummarizedFrequencyInMinutes=60
-
-# Elasticsearch scroll api context keep alive value
-synchronizer.scrollContextTimeToLiveInMinutes=5
-# Elasticsearch scroll api context max items per batch request
-synchronizer.numScrollContextItemsToRetrievePerRequest=5000
-
-
-############################## Deprecated, to be removed or updated ##############################
-synchronizer.resolver.progressLogFrequencyInMs=60000
-synchronizer.resolver.queueMonitorFrequencyInMs=1000
-synchronizer.resolver.displayVerboseQueueManagerStats=false
-synchronizer.indexIntegrityValidator.enabled=false
-synchronizer.indexIntegrityValidatorFrequencyInMs=3600000
-synchronizer.suppressResourceNotFoundErrors=true
-synchronizer.applyNodesOnlyModifier=false \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/templates/configmap.yaml b/kubernetes/aai/charts/aai-sparky-be/templates/configmap.yaml
index d827c4eb36..1f6ed74029 100644
--- a/kubernetes/aai/charts/aai-sparky-be/templates/configmap.yaml
+++ b/kubernetes/aai/charts/aai-sparky-be/templates/configmap.yaml
@@ -20,4 +20,53 @@ metadata:
name: {{ include "common.fullname" . }}-log
namespace: {{ include "common.namespace" . }}
data:
-{{ tpl (.Files.Glob "resources/config/log/logback.xml").AsConfig . | indent 2 }}
+{{ tpl (.Files.Glob "resources/config/log/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-camel-rests
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/camel-rests/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-descriptors
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/descriptors/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-filters
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/filters/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-schemas
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/schemas/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-spring-beans
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/spring-beans/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-boot-inf
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/portal/BOOT-INF/classes/*").AsConfig . | indent 2 }}
+
diff --git a/kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml b/kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml
index 05a808e805..424cf1155d 100644
--- a/kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml
@@ -34,77 +34,62 @@ spec:
release: {{ .Release.Name }}
name: {{ include "common.name" . }}
spec:
- initContainers:
- - name: {{ include "common.name" . }}-inject-models
- command:
- - /bin/bash
- - "-c"
- - |
- git clone -b {{ .Values.config.gerritBranch }} --single-branch {{ .Values.config.gerritProject }} /tmp/gerrit
- cp -rp /tmp/gerrit/data-router/appconfig/model/* /model-dir
- image: "{{ .Values.global.repository | default .Values.dockerHubRepository }}/{{ .Values.ubuntuInitImage }}"
- imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- volumeMounts:
- - name: modeldir
- mountPath: "/model-dir"
containers:
- name: {{ include "common.name" . }}
image: "{{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
env:
- name: CONFIG_HOME
- value: /opt/app/sparky/config/
- - name: KEY_MANAGER_PASSWORD
- value: {{ .Values.config.keyManagerPassword }}
- - name: KEY_STORE_PASSWORD
+ value: /opt/app/sparky/appconfig/
+ - name: KEYSTORE_ALIAS_PASSWORD
+ value: {{ .Values.config.keystoreAliasPassword }}
+ - name: KEYSTORE_PASSWORD
value: {{ .Values.config.keyStorePassword }}
+ - name: SPARKY_SSL_ENABLED
+ value: 'true'
+ - name: SPARKY_PORTAL_ENABLED
+ value: 'false'
volumeMounts:
- mountPath: /etc/localtime
name: localtime
readOnly: true
- - mountPath: /opt/app/sparky/config/auth/
+ - mountPath: /opt/app/sparky/appconfig/auth/
name: {{ include "common.fullname" . }}-auth-config
- - mountPath: /opt/app/sparky/config/synchronizer.properties
- subPath: synchronizer.properties
- name: {{ include "common.fullname" . }}-config
- - mountPath: /opt/app/sparky/config/suggestive-search.properties
- subPath: suggestive-search.properties
- name: {{ include "common.fullname" . }}-config
- - mountPath: /opt/app/sparky/config/search-service.properties
- subPath: search-service.properties
- name: {{ include "common.fullname" . }}-config
- - mountPath: /opt/app/sparky/config/roles.config
- subPath: roles.config
+ - mountPath: /opt/app/sparky/appconfig/camel-rests/
+ name: {{ include "common.fullname" . }}-camel-rests-config
+ - mountPath: /opt/app/sparky/appconfig/descriptors/
+ name: {{ include "common.fullname" . }}-descriptors-config
+ - mountPath: /opt/app/sparky/appconfig/filters/
+ name: {{ include "common.fullname" . }}-filters-config
+ - mountPath: /opt/app/sparky/appconfig/
name: {{ include "common.fullname" . }}-config
- - mountPath: /opt/app/sparky/config/elasticsearch.properties
- subPath: elasticsearch.properties
- name: {{ include "common.fullname" . }}-config
- - mountPath: /opt/app/sparky/config/aai.properties
- subPath: aai.properties
- name: {{ include "common.fullname" . }}-config
- - mountPath: /opt/app/sparky/config/portal/
+ - mountPath: /opt/app/sparky/appconfig/portal
name: {{ include "common.fullname" . }}-portal-config
+ - mountPath: /opt/app/sparky/appconfig/portal/BOOT-INF/classes
+ name: {{ include "common.fullname" . }}-portal-boot-inf-config
+ - mountPath: /opt/app/sparky/appconfig/schemas
+ name: {{ include "common.fullname" . }}-schemas-config
+ - mountPath: /opt/app/sparky/appconfig/spring-beans/
+ name: {{ include "common.fullname" . }}-spring-beans-config
- mountPath: /var/log/onap
name: {{ include "common.fullname" . }}-logs
- - mountPath: /opt/app/sparky/bundleconfig/etc/logback.xml
+ - mountPath: /opt/app/sparky/appconfig/logging/
name: {{ include "common.fullname" . }}-log-conf
- subPath: logback.xml
- - name: modeldir
- mountPath: /opt/app/sparky/config/model
ports:
- containerPort: {{ .Values.service.internalPort }}
+ - containerPort: {{ .Values.service.internalPort2 }}
# disable liveness probe when breakpoints set in debugger
# so K8s doesn't restart unresponsive container
{{- if eq .Values.liveness.enabled true }}
livenessProbe:
tcpSocket:
- port: {{ .Values.service.internalPort }}
+ port: {{ .Values.service.internalPort2 }}
initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
periodSeconds: {{ .Values.liveness.periodSeconds }}
{{ end -}}
readinessProbe:
tcpSocket:
- port: {{ .Values.service.internalPort }}
+ port: {{ .Values.service.internalPort2 }}
initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.readiness.periodSeconds }}
resources:
@@ -138,12 +123,30 @@ spec:
- name: {{ include "common.fullname" . }}-config
configMap:
name: {{ include "common.fullname" . }}
- - name: {{ include "common.fullname" . }}-portal-config
- configMap:
- name: {{ include "common.fullname" . }}-portal
- name: {{ include "common.fullname" . }}-auth-config
secret:
secretName: {{ include "common.fullname" . }}
+ - name: {{ include "common.fullname" . }}-camel-rests-config
+ configMap:
+ name: {{ include "common.fullname" . }}-camel-rests
+ - name: {{ include "common.fullname" . }}-descriptors-config
+ configMap:
+ name: {{ include "common.fullname" . }}-descriptors
+ - name: {{ include "common.fullname" . }}-filters-config
+ configMap:
+ name: {{ include "common.fullname" . }}-filters
+ - name: {{ include "common.fullname" . }}-portal-config
+ configMap:
+ name: {{ include "common.fullname" . }}-portal
+ - name: {{ include "common.fullname" . }}-portal-boot-inf-config
+ configMap:
+ name: {{ include "common.fullname" . }}-boot-inf
+ - name: {{ include "common.fullname" . }}-schemas-config
+ configMap:
+ name: {{ include "common.fullname" . }}-schemas
+ - name: {{ include "common.fullname" . }}-spring-beans-config
+ configMap:
+ name: {{ include "common.fullname" . }}-spring-beans
- name: filebeat-conf
configMap:
name: aai-filebeat
@@ -153,9 +156,7 @@ spec:
emptyDir: {}
- name: {{ include "common.fullname" . }}-log-conf
configMap:
- name: {{ include "common.fullname" . }}-log
- - name: modeldir
- emptyDir: {}
+ name: {{ include "common.fullname" . }}-log
restartPolicy: {{ .Values.global.restartPolicy | default .Values.restartPolicy }}
imagePullSecrets:
- name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/aai/charts/aai-sparky-be/templates/service.yaml b/kubernetes/aai/charts/aai-sparky-be/templates/service.yaml
index 41bc163696..e97935aedd 100644
--- a/kubernetes/aai/charts/aai-sparky-be/templates/service.yaml
+++ b/kubernetes/aai/charts/aai-sparky-be/templates/service.yaml
@@ -1,7 +1,7 @@
apiVersion: v1
kind: Service
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.servicename" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
diff --git a/kubernetes/aai/charts/aai-sparky-be/values.yaml b/kubernetes/aai/charts/aai-sparky-be/values.yaml
index df058a4af2..5c97bdb6eb 100644
--- a/kubernetes/aai/charts/aai-sparky-be/values.yaml
+++ b/kubernetes/aai/charts/aai-sparky-be/values.yaml
@@ -4,11 +4,18 @@
global: # global defaults
nodePortPrefix: 302
repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
-
+ aai:
+ serviceName: aai-aai
+ aaiElasticsearch:
+ serviceName: aai-elasticsearch
+ gizmo:
+ serviceName: aai-gizmo
+ searchData:
+ serviceName: aai-search-data
# application image
repository: nexus3.onap.org:10001
-image: onap/sparky-be:v1.1.0
+image: onap/sparky-be:1.2-STAGING-latest
pullPolicy: Always
restartPolicy: Always
@@ -18,10 +25,17 @@ ubuntuInitImage: oomk8s/ubuntu-init:2.0.0
config:
elasticsearchHttpPort: 9200
keyStorePassword: OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
- keyManagerPassword: OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
+ keystoreAliasPassword: OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
gerritBranch: master
gerritProject: http://gerrit.onap.org/r/aai/test-config
+ portalUsername: aaiui
+ portalPassword: 1t2v1vfv1unz1vgz1t3b
+# ONAP Cookie Processing - During initial development, the following flag, if true, will
+# prevent the portal interface's login processing from searching for a user
+# specific cookie, and will instead allow passage if a valid session cookie is discovered.
+ portalOnapEnabled: true
+#
# override chart name (sparky-be) to share a common namespace
# suffix with parent chart (aai)
@@ -51,6 +65,7 @@ service:
type: ClusterIP
name: aai-sparky-be
internalPort: 9517
+ internalPort2: 8000
ingress:
enabled: false
diff --git a/kubernetes/aai/values.yaml b/kubernetes/aai/values.yaml
index 5d08268e88..e8798186ed 100644
--- a/kubernetes/aai/values.yaml
+++ b/kubernetes/aai/values.yaml
@@ -27,7 +27,33 @@ global: # global defaults
loggingImage: beats/filebeat:5.5.0
restartPolicy: Always
cassandra:
+ serviceName: aai-cassandra
replicas: 3
+ aai:
+ serviceName: aai-aai
+ babel:
+ serviceName: aai-babel
+ champ:
+ serviceName: aai-champ
+ aaiElasticsearch:
+ serviceName: aai-elasticsearch
+ hbase:
+ serviceName: aai-hbase
+ resources:
+ serviceName: aai-resources
+ sparkyBe:
+ serviceName: aai-sparky-be
+ dataRouter:
+ serviceName: aai-data-router
+ gizmo:
+ serviceName: aai-gizmo
+ modelloader:
+ serviceName: aai-modelloader
+ searchData:
+ serviceName: aai-search-data
+ traversal:
+ serviceName: aai-traversal
+
# application image
dockerhubRepository: registry.hub.docker.com
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/installAppcDb.sh b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/installAppcDb.sh
index f260f3d84a..abc293572b 100755
--- a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/installAppcDb.sh
+++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/installAppcDb.sh
@@ -18,8 +18,8 @@
# ============LICENSE_END=========================================================
###
-SDNC_HOME=${SDNC_HOME:-/opt/onap/sdnc}
-APPC_HOME=${APPC_HOME:-/opt/openecomp/appc}
+SDNC_HOME=${SDNC_HOME:-/opt/onap/ccsdk}
+APPC_HOME=${APPC_HOME:-/opt/onap/appc}
MYSQL_PASSWD=${MYSQL_PASSWD:-{{.Values.config.dbRootPassword}}}
APPC_DB_USER=${APPC_DB_USER:-appcctl}
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
index e5f04ca5e2..e13193f364 100755
--- a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
+++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
@@ -32,10 +32,6 @@ function enable_odl_cluster(){
exit
fi
- echo "Installing Opendaylight cluster features"
- ${ODL_HOME}/bin/client feature:install odl-mdsal-clustering
- ${ODL_HOME}/bin/client feature:install odl-jolokia
-
echo "Update cluster information statically"
hm=$(hostname)
echo "Get current Hostname ${hm}"
@@ -99,6 +95,9 @@ then
echo "Installing APPC database"
${APPC_HOME}/bin/installAppcDb.sh
+ echo "Installing ODL Host Key"
+ ${SDNC_HOME}/bin/installOdlHostKey.sh
+
echo "Starting OpenDaylight"
${ODL_HOME}/bin/start
@@ -108,7 +107,7 @@ then
echo "Copying a working version of the logging configuration into the opendaylight etc folder"
cp ${APPC_HOME}/data/org.ops4j.pax.logging.cfg ${ODL_HOME}/etc/org.ops4j.pax.logging.cfg
echo "Copying a new version of aaf cadi shiro into the opendaylight deploy folder"
- cp ${APPC_HOME}/data/aaf-cadi-shiro.jar ${ODL_HOME}/deploy/aaf-cadi-shiro.jar
+ cp ${APPC_HOME}/data/aaf-shiro-aafrealm-osgi-bundle.jar ${ODL_HOME}/deploy/aaf-shiro-aafrealm-osgi-bundle.jar
echo "Installing SDNC platform features"
${SDNC_HOME}/bin/installFeatures.sh
@@ -119,6 +118,8 @@ then
${SDNC_HOME}/svclogic/bin/install.sh
fi
+ if $ENABLE_ODL_CLUSTER ; then echo "Installing Opendaylight cluster features" ; ${ODL_HOME}/bin/client feature:install odl-mdsal-clustering ; ${ODL_HOME}/bin/client feature:install odl-jolokia ; fi
+
echo "Installing APPC platform features"
${APPC_HOME}/bin/installFeatures.sh
@@ -141,13 +142,31 @@ then
echo "cadi_prop_files=${APPC_HOME}/data/properties/cadi.properties" >> ${ODL_HOME}/etc/system.properties
echo "" >> ${ODL_HOME}/etc/system.properties
- echo "Copying a working version of the shiro configuration into the opendaylight etc folder"
- cp ${APPC_HOME}/data/shiro.ini ${ODL_HOME}/etc/shiro.ini
+ echo "Copying the aaa shiro configuration into opendaylight"
+ cp ${APPC_HOME}/data/aaa-app-config.xml ${ODL_HOME}/etc/opendaylight/datastore/initial/config/aaa-app-config.xml
+
echo "Restarting OpenDaylight"
${ODL_HOME}/bin/stop
- echo "Waiting 60 seconds for OpenDaylight stop to complete"
- sleep 60
+ checkRun () {
+ running=0
+ while read a b c d e f g h
+ do
+ if [ "$h" == "/bin/sh /opt/opendaylight/current/bin/karaf server" ]
+ then
+ running=1
+ fi
+ done < <(ps -eaf)
+ echo $running
+ }
+
+ while [ $( checkRun ) == 1 ]
+ do
+ echo "Karaf is still running, waiting..."
+ sleep 5s
+ done
+ echo "Karaf process has stopped"
+ sleep 10s
echo "Installed at `date`" > ${SDNC_HOME}/.installed
fi
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaiclient.properties b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaiclient.properties
index b2f7102d8c..a91c399933 100644
--- a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaiclient.properties
+++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaiclient.properties
@@ -25,8 +25,10 @@
#
# Certificate keystore and truststore
#
-org.onap.ccsdk.sli.adaptors.aai.ssl.trust=/opt/openecomp/appc/data/stores/truststore.openecomp.client.jks
-org.onap.ccsdk.sli.adaptors.aai.ssl.trust.psswd=adminadmin
+org.onap.ccsdk.sli.adaptors.aai.ssl.trust=/opt/onap/appc/data/stores/ONAPall.p12
+org.onap.ccsdk.sli.adaptors.aai.ssl.trust.psswd=changeit
+org.onap.ccsdk.sli.adaptors.aai.ssl.key=/opt/onap/appc/data/stores/ONAPall.p12
+org.onap.ccsdk.sli.adaptors.aai.ssl.key.psswd=changeit
org.onap.ccsdk.sli.adaptors.aai.host.certificate.ignore=true
org.onap.ccsdk.sli.adaptors.aai.client.name=AAI
@@ -39,106 +41,105 @@ org.onap.ccsdk.sli.adaptors.aai.application=openECOMP
#
# Configuration file for A&AI Client
#
-org.onap.ccsdk.sli.adaptors.aai.uri=https://aai-service.{{.Release.Namespace}}:8443
-
+org.onap.ccsdk.sli.adaptors.aai.uri=https://aai.{{.Release.Namespace}}:8443
# query
-org.onap.ccsdk.sli.adaptors.aai.path.query=/aai/v11/search/sdn-zone-query
-org.onap.ccsdk.sli.adaptors.aai.query.nodes=/aai/v11/search/nodes-query?search-node-type={node-type}&filter={entity-identifier}:EQUALS:{entity-name}
-org.onap.ccsdk.sli.adaptors.aai.query.generic=/aai/v11/search/generic-query?key={identifier}:{value}&start-node-type={start-node-type}&include=complex&depth=3
+org.onap.ccsdk.sli.adaptors.aai.path.query=/aai/v13/search/sdn-zone-query
+org.onap.ccsdk.sli.adaptors.aai.query.nodes=/aai/v13/search/nodes-query?search-node-type={node-type}&filter={entity-identifier}:EQUALS:{entity-name}
+org.onap.ccsdk.sli.adaptors.aai.query.generic=/aai/v13/search/generic-query?key={identifier}:{value}&start-node-type={start-node-type}&include=complex&depth=3
# named query
org.onap.ccsdk.sli.adaptors.aai.query.named=/aai/search/named-query
#update
-org.onap.ccsdk.sli.adaptors.aai.update=/aai/v11/actions/update
+org.onap.ccsdk.sli.adaptors.aai.update=/aai/v13/actions/update
# vce
-org.onap.ccsdk.sli.adaptors.aai.path.vce =/aai/v11/network/vces/vce/
-org.onap.ccsdk.sli.adaptors.aai.path.vces=/aai/v11/network/vces/
+org.onap.ccsdk.sli.adaptors.aai.path.vce =/aai/v13/network/vces/vce/
+org.onap.ccsdk.sli.adaptors.aai.path.vces=/aai/v13/network/vces/
# vpe
-org.onap.ccsdk.sli.adaptors.aai.path.vpe =/aai/v11/network/vpes/vpe/
-org.onap.ccsdk.sli.adaptors.aai.path.vpes=/aai/v11/network/vpes/
+org.onap.ccsdk.sli.adaptors.aai.path.vpe =/aai/v13/network/vpes/vpe/
+org.onap.ccsdk.sli.adaptors.aai.path.vpes=/aai/v13/network/vpes/
# customer
-org.onap.ccsdk.sli.adaptors.aai.path.customer=/aai/v11/business/customers/customer/{customer-id}
+org.onap.ccsdk.sli.adaptors.aai.path.customer=/aai/v13/business/customers/customer/{customer-id}
# service subscription
-org.onap.ccsdk.sli.adaptors.aai.path.service.subscription=/aai/v11/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}
+org.onap.ccsdk.sli.adaptors.aai.path.service.subscription=/aai/v13/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}
# service instance
-org.onap.ccsdk.sli.adaptors.aai.path.svcinst=/aai/v11/business/customers/customer/{customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances
-org.onap.ccsdk.sli.adaptors.aai.path.svcinst.query=/aai/v11/search/generic-query?key=service-instance.service-instance-id:{svc-instance-id}&start-node-type=service-instance&include=service-instance
-org.onap.ccsdk.sli.adaptors.aai.path.service.instance=/aai/v11/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances/service-instance/{service-instance-id}
+org.onap.ccsdk.sli.adaptors.aai.path.svcinst=/aai/v13/business/customers/customer/{customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances
+org.onap.ccsdk.sli.adaptors.aai.path.svcinst.query=/aai/v13/search/generic-query?key=service-instance.service-instance-id:{svc-instance-id}&start-node-type=service-instance&include=service-instance
+org.onap.ccsdk.sli.adaptors.aai.path.service.instance=/aai/v13/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances/service-instance/{service-instance-id}
# complex
-org.onap.ccsdk.sli.adaptors.aai.path.complexes=/aai/v11/cloud-infrastructure/complexes
-org.onap.ccsdk.sli.adaptors.aai.path.complex=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}
+org.onap.ccsdk.sli.adaptors.aai.path.complexes=/aai/v13/cloud-infrastructure/complexes
+org.onap.ccsdk.sli.adaptors.aai.path.complex=/aai/v13/cloud-infrastructure/complexes/complex/{physical-location-id}
# tenant
-org.onap.ccsdk.sli.adaptors.aai.path.tenant=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}
-org.onap.ccsdk.sli.adaptors.aai.path.tenant.query=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant?tenant-name={tenant-name}
+org.onap.ccsdk.sli.adaptors.aai.path.tenant=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}
+org.onap.ccsdk.sli.adaptors.aai.path.tenant.query=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant?tenant-name={tenant-name}
# vservers
-org.onap.ccsdk.sli.adaptors.aai.path.vservers=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/
-org.onap.ccsdk.sli.adaptors.aai.path.vserver=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}
+org.onap.ccsdk.sli.adaptors.aai.path.vservers=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/
+org.onap.ccsdk.sli.adaptors.aai.path.vserver=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}
# vpls-pe
-org.onap.ccsdk.sli.adaptors.aai.path.vpls.pes=/aai/v11/network/vpls-pes/
-org.onap.ccsdk.sli.adaptors.aai.path.vpls.pe =/aai/v11/network/vpls-pes/vpls-pe/
+org.onap.ccsdk.sli.adaptors.aai.path.vpls.pes=/aai/v13/network/vpls-pes/
+org.onap.ccsdk.sli.adaptors.aai.path.vpls.pe =/aai/v13/network/vpls-pes/vpls-pe/
# ctag-pool
-org.onap.ccsdk.sli.adaptors.aai.path.ctag.pools=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools
-org.onap.ccsdk.sli.adaptors.aai.path.ctag.pool=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools/ctag-pool/{target-pe}/{availability-zone-name}
+org.onap.ccsdk.sli.adaptors.aai.path.ctag.pools=/aai/v13/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools
+org.onap.ccsdk.sli.adaptors.aai.path.ctag.pool=/aai/v13/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools/ctag-pool/{target-pe}/{availability-zone-name}
#
#-------------- 1510 ----------------------
#
# pservers
-org.onap.ccsdk.sli.adaptors.aai.path.pservers=/aai/v11/cloud-infrastructure/pservers
-org.onap.ccsdk.sli.adaptors.aai.path.pserver=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}
+org.onap.ccsdk.sli.adaptors.aai.path.pservers=/aai/v13/cloud-infrastructure/pservers
+org.onap.ccsdk.sli.adaptors.aai.path.pserver=/aai/v13/cloud-infrastructure/pservers/pserver/{hostname}
# generic-vnf
-org.onap.ccsdk.sli.adaptors.aai.path.generic.vnfs=/aai/v11/network/generic-vnfs
-org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnfs=/aai/v13/network/generic-vnfs
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}
# dvs-switch
-org.onap.ccsdk.sli.adaptors.aai.path.dvsswitches=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches
-org.onap.ccsdk.sli.adaptors.aai.path.dvsswitch=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches/dvs-switch/{switch-name}
+org.onap.ccsdk.sli.adaptors.aai.path.dvsswitches=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches
+org.onap.ccsdk.sli.adaptors.aai.path.dvsswitch=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches/dvs-switch/{switch-name}
# L3 Networks
-org.onap.ccsdk.sli.adaptors.aai.path.l3networks=/aai/v11/network/l3-networks
-org.onap.ccsdk.sli.adaptors.aai.path.l3network=/aai/v11/network/l3-networks/l3-network/{network-id}
-org.onap.ccsdk.sli.adaptors.aai.path.l3network.query.name=/aai/v11/network/l3-networks/l3-network?network-name={network-name}
+org.onap.ccsdk.sli.adaptors.aai.path.l3networks=/aai/v13/network/l3-networks
+org.onap.ccsdk.sli.adaptors.aai.path.l3network=/aai/v13/network/l3-networks/l3-network/{network-id}
+org.onap.ccsdk.sli.adaptors.aai.path.l3network.query.name=/aai/v13/network/l3-networks/l3-network?network-name={network-name}
# P-Interfaces
-org.onap.ccsdk.sli.adaptors.aai.path.pserver.pinterfaces=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces
-org.onap.ccsdk.sli.adaptors.aai.path.pserver.pinterface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pserver.pinterfaces=/aai/v13/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces
+org.onap.ccsdk.sli.adaptors.aai.path.pserver.pinterface=/aai/v13/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{interface-name}
# Physical Link
-org.onap.ccsdk.sli.adaptors.aai.path.physical.links=/aai/v11/network/physical-links
-org.onap.ccsdk.sli.adaptors.aai.path.physical.link=/aai/v11/network/physical-links/physical-link/{link-name}
+org.onap.ccsdk.sli.adaptors.aai.path.physical.links=/aai/v13/network/physical-links
+org.onap.ccsdk.sli.adaptors.aai.path.physical.link=/aai/v13/network/physical-links/physical-link/{link-name}
# VPN Bindings
-org.onap.ccsdk.sli.adaptors.aai.path.vpn.bindings=/aai/v11/network/vpn-bindings/
-org.onap.ccsdk.sli.adaptors.aai.path.vpn.binding=/aai/v11/network/vpn-bindings/vpn-binding/{vpn-id}
+org.onap.ccsdk.sli.adaptors.aai.path.vpn.bindings=/aai/v13/network/vpn-bindings/
+org.onap.ccsdk.sli.adaptors.aai.path.vpn.binding=/aai/v13/network/vpn-bindings/vpn-binding/{vpn-id}
# VNF IMAGES
-org.onap.ccsdk.sli.adaptors.aai.path.vnf.images=/aai/v11/service-design-and-creation/vnf-images
-org.onap.ccsdk.sli.adaptors.aai.path.vnf.image=/aai/v11/service-design-and-creation/vnf-images/vnf-image/{att-uuid}
-org.onap.ccsdk.sli.adaptors.aai.path.vnf.image.query=/aai/v11/service-design-and-creation/vnf-images/vnf-image?application={application_model}&application-vendor={application_vendor}
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.images=/aai/v13/service-design-and-creation/vnf-images
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.image=/aai/v13/service-design-and-creation/vnf-images/vnf-image/{att-uuid}
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.image.query=/aai/v13/service-design-and-creation/vnf-images/vnf-image?application={application_model}&application-vendor={application_vendor}
# UBB Notify
-org.onap.ccsdk.sli.adaptors.aai.path.notify=/aai/v11/actions/notify
+org.onap.ccsdk.sli.adaptors.aai.path.notify=/aai/v13/actions/notify
org.onap.ccsdk.sli.adaptors.aai.notify.selflink.fqdn=https://aai-service.{{.Release.Namespace}}:8443/restconf/config/L3SDN-API:services/layer3-service-list/{service-instance-id}
org.onap.ccsdk.sli.adaptors.aai.notify.selflink.avpn=https://aai-service.{{.Release.Namespace}}:8443/restconf/config/L3AVPN-EVC-API:services/service-list/{service-instance-id}/service-data/avpn-logicalchannel-information
# Service
-org.onap.ccsdk.sli.adaptors.aai.path.service=/aai/v11/service-design-and-creation/services/service/{service-id}
-org.onap.ccsdk.sli.adaptors.aai.path.services=/aai/v11/service-design-and-creation/services
+org.onap.ccsdk.sli.adaptors.aai.path.service=/aai/v13/service-design-and-creation/services/service/{service-id}
+org.onap.ccsdk.sli.adaptors.aai.path.services=/aai/v13/service-design-and-creation/services
#
@@ -146,98 +147,98 @@ org.onap.ccsdk.sli.adaptors.aai.path.services=/aai/v11/service-design-and-creati
#
# VNFC
-org.onap.ccsdk.sli.adaptors.aai.path.vnfc=/aai/v11/network/vnfcs/vnfc/{vnfc-name}
+org.onap.ccsdk.sli.adaptors.aai.path.vnfc=/aai/v13/network/vnfcs/vnfc/{vnfc-name}
# class-of-service
-org.onap.ccsdk.sli.adaptors.aai.path.class.of.service=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}/classes-of-service/class-of-service/{cos-id}
+org.onap.ccsdk.sli.adaptors.aai.path.class.of.service=/aai/v13/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}/classes-of-service/class-of-service/{cos-id}
# site-pair
-org.onap.ccsdk.sli.adaptors.aai.path.site.pair=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}
+org.onap.ccsdk.sli.adaptors.aai.path.site.pair=/aai/v13/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}
# routing-instance
-org.onap.ccsdk.sli.adaptors.aai.path.routing.instance=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}
+org.onap.ccsdk.sli.adaptors.aai.path.routing.instance=/aai/v13/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}
# site-pair-set
-org.onap.ccsdk.sli.adaptors.aai.path.site.pair.set=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}
+org.onap.ccsdk.sli.adaptors.aai.path.site.pair.set=/aai/v13/network/site-pair-sets/site-pair-set/{site-pair-set-id}
# license key resource
-org.onap.ccsdk.sli.adaptors.aai.path.license.acquire=/aai/v11/actions/assignment/license-management/assignment-group-uuid/{assignment-group-uuid}
-org.onap.ccsdk.sli.adaptors.aai.path.license=/aai/v11/license-management/license-key-resources/license-key-resource/{att-uuid}
+org.onap.ccsdk.sli.adaptors.aai.path.license.acquire=/aai/v13/actions/assignment/license-management/assignment-group-uuid/{assignment-group-uuid}
+org.onap.ccsdk.sli.adaptors.aai.path.license=/aai/v13/license-management/license-key-resources/license-key-resource/{att-uuid}
# logical-link
-org.onap.ccsdk.sli.adaptors.aai.path.logical.link =/aai/v11/network/logical-links/logical-link/{link-name}
+org.onap.ccsdk.sli.adaptors.aai.path.logical.link =/aai/v13/network/logical-links/logical-link/{link-name}
# virtual-data-center
-org.onap.ccsdk.sli.adaptors.aai.path.virtual.data.center=/aai/v11/cloud-infrastructure/virtual-data-centers/virtual-data-center/{vdc-id}
+org.onap.ccsdk.sli.adaptors.aai.path.virtual.data.center=/aai/v13/cloud-infrastructure/virtual-data-centers/virtual-data-center/{vdc-id}
# wan-connector
-org.onap.ccsdk.sli.adaptors.aai.path.wan.connector=/aai/v11/business/connectors/connector/{resource-instance-id}
+org.onap.ccsdk.sli.adaptors.aai.path.wan.connector=/aai/v13/business/connectors/connector/{resource-instance-id}
# l-interface
-org.onap.ccsdk.sli.adaptors.aai.path.lag.interface.l.interface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
-org.onap.ccsdk.sli.adaptors.aai.path.p.interface.l.interface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.lag.interface.l.interface=/aai/v13/cloud-infrastructure/pservers/pserver/{hostname}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.p.interface.l.interface=/aai/v13/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
# l-interface pnf
-org.onap.ccsdk.sli.adaptors.aai.path.lag.interface.l.interface.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
-org.onap.ccsdk.sli.adaptors.aai.path.p.interface.l.interface.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.lag.interface.l.interface.pnf=/aai/v13/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.p.interface.l.interface.pnf=/aai/v13/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
# subinterface
-org.onap.ccsdk.sli.adaptors.aai.path.pnf.lag.interface.subinterface=/aai/v11/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
-org.onap.ccsdk.sli.adaptors.aai.path.pnf.p.interface.l.interface=/aai/v11/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pnf.lag.interface.subinterface=/aai/v13/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pnf.p.interface.l.interface=/aai/v13/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
# vlans
-org.onap.ccsdk.sli.adaptors.aai.path.vlan=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
-org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf.vlan=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
+org.onap.ccsdk.sli.adaptors.aai.path.vlan=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf.vlan=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
# l3-interface-ipv4-address-list
-org.onap.ccsdk.sli.adaptors.aai.path.l3.interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
-org.onap.ccsdk.sli.adaptors.aai.path.vlan.l3.interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+org.onap.ccsdk.sli.adaptors.aai.path.l3.interface.ipv4.address.list=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+org.onap.ccsdk.sli.adaptors.aai.path.vlan.l3.interface.ipv4.address.list=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
# l3-interface-ipv6-address-list
-org.onap.ccsdk.sli.adaptors.aai.path.l3.interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
-org.onap.ccsdk.sli.adaptors.aai.path.vlan.l3.interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+org.onap.ccsdk.sli.adaptors.aai.path.l3.interface.ipv6.address.list=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+org.onap.ccsdk.sli.adaptors.aai.path.vlan.l3.interface.ipv6.address.list=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
# ipsec-configuration
-org.onap.ccsdk.sli.adaptors.aai.path.ipsec.configuration=/aai/v11/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}
+org.onap.ccsdk.sli.adaptors.aai.path.ipsec.configuration=/aai/v13/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}
# vig server
-org.onap.ccsdk.sli.adaptors.aai.path.vig.server=/aai/v11/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}/vig-servers/vig-server/{vig-address-type}
+org.onap.ccsdk.sli.adaptors.aai.path.vig.server=/aai/v13/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}/vig-servers/vig-server/{vig-address-type}
# l3-network
-org.onap.ccsdk.sli.adaptors.aai.path.l3.network=/aai/v11/network/l3-networks/l3-network/{network-id}
+org.onap.ccsdk.sli.adaptors.aai.path.l3.network=/aai/v13/network/l3-networks/l3-network/{network-id}
# subnet
-org.onap.ccsdk.sli.adaptors.aai.path.subnet=/aai/v11/network/l3-networks/l3-network/{network-id}/subnets/subnet/{subnet-id}
+org.onap.ccsdk.sli.adaptors.aai.path.subnet=/aai/v13/network/l3-networks/l3-network/{network-id}/subnets/subnet/{subnet-id}
# multicast-configuration
-org.onap.ccsdk.sli.adaptors.aai.path.multicast.configuration=/aai/v11/network/multicast-configurations/multicast-configuration/{multicast-configuration-id}
+org.onap.ccsdk.sli.adaptors.aai.path.multicast.configuration=/aai/v13/network/multicast-configurations/multicast-configuration/{multicast-configuration-id}
# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.ipv4.address.list
-org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.ipv4.address.list=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.vlan.ipv4.address.list
-org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.vlan.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.vlan.ipv4.address.list=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.ipv6.address.list
-org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.ipv6.address.list=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
# volume.group
-org.onap.ccsdk.sli.adaptors.aai.path.volume.group=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/volume-groups/volume-group/{volume-group-id}
+org.onap.ccsdk.sli.adaptors.aai.path.volume.group=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/volume-groups/volume-group/{volume-group-id}
#cloud region
-org.onap.ccsdk.sli.adaptors.aai.path.cloud.region=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}
+org.onap.ccsdk.sli.adaptors.aai.path.cloud.region=/aai/v13/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}
# vf-module
-org.onap.ccsdk.sli.adaptors.aai.path.vf.module=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/vf-modules/vf-module/{vf-module-id}
+org.onap.ccsdk.sli.adaptors.aai.path.vf.module=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/vf-modules/vf-module/{vf-module-id}
# l-interface through generic-vnf
-org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf.linterface=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf.linterface=/aai/v13/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}
# network-policy
-org.onap.ccsdk.sli.adaptors.aai.path.network.policy=/aai/v11/network/network-policies/network-policy/{network-policy-id}
+org.onap.ccsdk.sli.adaptors.aai.path.network.policy=/aai/v13/network/network-policies/network-policy/{network-policy-id}
# pnf
-org.onap.ccsdk.sli.adaptors.aai.path.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pnf=/aai/v13/network/pnfs/pnf/{pnf-name}
#
# Formatting
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties
index 7c73beb5bc..bf41d5b2ed 100644
--- a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties
+++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties
@@ -53,17 +53,13 @@ org.openecomp.appc.db.pass.sdnctl=gamma
### ###
### OpenStack credentials (these properties also are used in appc-rest-adapter-bundle, appc-chef-adapter-bundle, appc-iaas-adapter-bundle) ###
### ###
-provider1.type=OpenStackProvider
-provider1.name=OpenStack
-provider1.identity=http://localhost:8181/apidoc/explorer/index.html
-provider1.tenant1.name=default
-provider1.tenant1.domain=default
-provider1.tenant1.userid=admin
-provider1.tenant1.password=admin
-
-
-
-
+provider1.type={{.Values.config.openStackType}}
+provider1.name={{.Values.config.openStackName}}
+provider1.identity={{.Values.config.openStackKeyStoneUrl}}
+provider1.tenant1.name={{.Values.config.openStackServiceTenantName}}
+provider1.tenant1.domain={{.Values.config.openStackDomain}}
+provider1.tenant1.userid={{.Values.config.openStackUserName}}
+provider1.tenant1.password={{.Values.config.openStackEncryptedPassword}}
### ###
### Properties that are not covered or being replaced from default.properties files. Default value for DMaaP IP is 10.0.11.1:3904 ###
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/ccsdk/bin/installSdncDb.sh b/kubernetes/appc/resources/config/appc/opt/onap/ccsdk/bin/installSdncDb.sh
index 25560fbef4..deca2ff382 100755
--- a/kubernetes/appc/resources/config/appc/opt/onap/ccsdk/bin/installSdncDb.sh
+++ b/kubernetes/appc/resources/config/appc/opt/onap/ccsdk/bin/installSdncDb.sh
@@ -21,7 +21,7 @@
# ============LICENSE_END=========================================================
###
-SDNC_HOME=${SDNC_HOME:-/opt/onap/sdnc}
+SDNC_HOME=${SDNC_HOME:-/opt/onap/ccsdk}
MYSQL_PASSWD=${MYSQL_PASSWD:-{{.Values.config.dbRootPassword}}}
SDNC_DB_USER=${SDNC_DB_USER:-sdnctl}
diff --git a/kubernetes/appc/templates/pv.yaml b/kubernetes/appc/templates/pv.yaml
new file mode 100644
index 0000000000..10214097e3
--- /dev/null
+++ b/kubernetes/appc/templates/pv.yaml
@@ -0,0 +1,130 @@
+{{/*
+# Copyright © 2018 Amdocs, AT&T, Bell Canada
+# #
+# # Licensed under the Apache License, Version 2.0 (the "License");
+# # you may not use this file except in compliance with the License.
+# # You may obtain a copy of the License at
+# #
+# # http://www.apache.org/licenses/LICENSE-2.0
+# #
+# # Unless required by applicable law or agreed to in writing, software
+# # distributed under the License is distributed on an "AS IS" BASIS,
+# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# # See the License for the specific language governing permissions and
+# # limitations under the License.
+*/}}
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+{{ $pvNum := default 1 .Values.replicaCount | int }}
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}-data0
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}0
+{{ if gt $pvNum 1 }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}-data1
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}1
+{{ end }}
+{{ if gt $pvNum 2 }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}-data2
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}2
+{{ end }}
+{{ if gt $pvNum 3 }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}-data3
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}3
+{{ end }}
+{{ if gt $pvNum 4 }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}-data4
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}4
+{{ end }}
+{{- end -}}
diff --git a/kubernetes/appc/templates/statefulset.yaml b/kubernetes/appc/templates/statefulset.yaml
index 5da50a5fdf..e81c50d27f 100644
--- a/kubernetes/appc/templates/statefulset.yaml
+++ b/kubernetes/appc/templates/statefulset.yaml
@@ -18,6 +18,8 @@ spec:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
spec:
+ imagePullSecrets:
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
initContainers:
- command:
- /root/ready.py
@@ -112,6 +114,8 @@ spec:
- mountPath: /opt/onap/ccsdk/bin/installSdncDb.sh
name: onap-sdnc-bin
subPath: installSdncDb.sh
+ - mountPath: {{ .Values.persistence.mdsalPath }}
+ name: {{ include "common.fullname" . }}-data
- mountPath: /var/log/onap
name: logs
- mountPath: /opt/opendaylight/current/etc/org.ops4j.pax.logging.cfg
@@ -182,5 +186,19 @@ spec:
configMap:
name: {{ include "common.fullname" . }}-onap-sdnc-bin
defaultMode: 0755
- imagePullSecrets:
- - name: "{{ include "common.namespace" . }}-docker-registry-key"
+{{ if not .Values.persistence.enabled }}
+ - name: {{ include "common.fullname" . }}-data
+ emptyDir: {}
+{{ else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: {{ include "common.fullname" . }}-data
+ labels:
+ name: {{ include "common.fullname" . }}
+ spec:
+ accessModes: [ {{ .Values.persistence.accessMode }} ]
+ storageClassName: {{ include "common.fullname" . }}-data
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{ end }}
diff --git a/kubernetes/appc/values.yaml b/kubernetes/appc/values.yaml
index a9f67a9375..a0edf64e75 100644
--- a/kubernetes/appc/values.yaml
+++ b/kubernetes/appc/values.yaml
@@ -44,6 +44,13 @@ config:
dmaapTopic: SUCCESS
logstashServiceName: log-ls
logstashPort: 5044
+ openStackType: OpenStackProvider
+ openStackName: OpenStack
+ openStackKeyStoneUrl: http://localhost:8181/apidoc/explorer/index.html
+ openStackServiceTenantName: default
+ openStackDomain: default
+ openStackUserName: admin
+ openStackEncryptedPassword: admin
mysql:
nameOverride: appc-db
@@ -97,6 +104,28 @@ service:
nodePort2: 31
clusterPort: 2550
+## Persist data to a persitent volume
+persistence:
+ enabled: true
+
+ ## A manually managed Persistent Volume and Claim
+ ## Requires persistence.enabled: true
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ volumeReclaimPolicy: Retain
+
+ ## database data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ accessMode: ReadWriteOnce
+ size: 1Gi
+ mountPath: /dockerdata-nfs
+ mountSubPath: appc/mdsal
+ mdsalPath: /opt/opendaylight/current/daexim
+
ingress:
enabled: false
diff --git a/kubernetes/clamp/templates/configmap.yaml b/kubernetes/clamp/templates/configmap.yaml
index b680487fc0..bee8f132ea 100644
--- a/kubernetes/clamp/templates/configmap.yaml
+++ b/kubernetes/clamp/templates/configmap.yaml
@@ -23,4 +23,4 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
- spring_application_json: {{ .Values.config.springApplicationJson | quote }}
+ spring_application_json: {{ tpl .Values.config.springApplicationJson . | quote }}
diff --git a/kubernetes/clamp/values.yaml b/kubernetes/clamp/values.yaml
index 827b5d99cc..8b782f9b05 100644
--- a/kubernetes/clamp/values.yaml
+++ b/kubernetes/clamp/values.yaml
@@ -42,7 +42,7 @@ config:
dataRootDir: /dockerdata-nfs
springApplicationJson: >
{
- "spring.datasource.cldsdb.url": "jdbc:mariadb:sequential://mariadb:3306/cldsdb4?autoReconnect=true&connectTimeout=10000&socketTimeout=10000&retriesAllDown=3",
+ "spring.datasource.cldsdb.url": "jdbc:mariadb:sequential://clampdb:3306/cldsdb4?autoReconnect=true&connectTimeout=10000&socketTimeout=10000&retriesAllDown=3",
"clamp.config.sdc.catalog.url": "https://sdc-be:8443/sdc/v1/catalog/",
"clamp.config.sdc.hostUrl": "https://sdc-be:8443/",
"clamp.config.sdc.serviceUrl": "https://sdc-be:8443/sdc/v1/catalog/services",
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json
index 6b42e0cc4a..8d0abd7303 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json
@@ -5,7 +5,7 @@
{
"id": "hbase-aai",
"name": "HBase Health Check",
- "http": "http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster",
+ "http": "http://hbase:8080/status/cluster",
"method": "GET",
"header": {
"Cache-Control": ["no-cache"],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json
index 2a111d66eb..0817a19cf0 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json
@@ -5,7 +5,7 @@
{
"id": "elasticsearch",
"name": "Search Data Service Document Store",
- "http": "http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/_cat/indices?v",
+ "http": "http://aai-elasticsearch:9200/_cat/indices?v",
"interval": "15s",
"timeout": "1s"
},
@@ -18,7 +18,7 @@
{
"id": "search-data-service-availability",
"name": "Search Data Service Availability",
- "script": "curl -k --cert /consul/certs/client-cert-onap.crt.pem --cert-type PEM --key /consul/certs/client-cert-onap.key.pem --key-type PEM https://search-data-service.{{ .Values.nsPrefix }}:9509/services/search-data-service/v1/jaxrsExample/jaxrs-services/echo/up 2>&1 | grep 'Up'",
+ "script": "curl -k --cert /consul/certs/client-cert-onap.crt.pem --cert-type PEM --key /consul/certs/client-cert-onap.key.pem --key-type PEM https://search-data-service:9509/services/search-data-service/v1/jaxrsExample/jaxrs-services/echo/up 2>&1 | grep 'Up'",
"interval": "15s"
},
{
diff --git a/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json
index 0274cd53c7..f06dec213f 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json
@@ -5,7 +5,7 @@
{
"id": "aai-service",
"name": "Core A&AI",
- "http": "https://aai-service.{{ .Values.nsPrefix }}:8443/aai/util/echo",
+ "http": "https://aai-service:8443/aai/util/echo",
"header": {
"Authorization": ["Basic QUFJOkFBSQ=="],
"X-TransactionId": ["ConsulHealthCheck"],
@@ -18,7 +18,7 @@
{
"id": "aai-resources",
"name": "Resources Microservice",
- "http": "https://aai-resources.{{ .Values.nsPrefix }}:8447/aai/util/echo",
+ "http": "https://aai-resources:8447/aai/util/echo",
"header": {
"Authorization": ["Basic QUFJOkFBSQ=="],
"X-TransactionId": ["ConsulHealthCheck"],
@@ -31,7 +31,7 @@
{
"id": "aai-traversal",
"name": "Traversal Microservice",
- "http": "https://aai-traversal.{{ .Values.nsPrefix }}:8446/aai/util/echo",
+ "http": "https://aai-traversal:8446/aai/util/echo",
"header": {
"Authorization": ["Basic QUFJOkFBSQ=="],
"X-TransactionId": ["ConsulHealthCheck"],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json b/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json
index 9505246c25..c86361743a 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json
@@ -5,7 +5,7 @@
{
"id": "appc-dgbuilder",
"name": "APPC-Dgbuilder Server Health Check",
- "http": "http://appc-dgbuilder.{{ .Values.nsPrefix }}:3000/",
+ "http": "http://appc-dgbuilder:3000/",
"method": "HEAD",
"header": {
"Authorization": ["Basic ZGd1c2VyOnRlc3QxMjM="],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json
index f36251a78f..230d31f509 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json
@@ -3,9 +3,9 @@
"name": "Health Check: APPC-SDN-CTL-DB-01",
"checks": [
{
- "id": "appc-sdnctldb01.{{ .Values.nsPrefix }}",
+ "id": "appc-sdnctldb01",
"name": "APPC SDNCTLDB01 Health Check",
- "tcp": "appc-sdnctldb01.{{ .Values.nsPrefix }}:3306",
+ "tcp": "appc-sdnctldb01:3306",
"interval": "10s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json
index 8c8171fe5f..da669e3ac0 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json
@@ -3,9 +3,9 @@
"name": "Health Check: APPC-SDN-CTL-DB-02",
"checks": [
{
- "id": "appc-sdnctldb02.{{ .Values.nsPrefix }}",
+ "id": "appc-sdnctldb02",
"name": "APPC SDNCTLDB02 Health Check",
- "tcp": "appc-sdnctldb02.{{ .Values.nsPrefix }}:3306",
+ "tcp": "appc-sdnctldb02:3306",
"interval": "10s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json
index ec6db9d0cb..1548cab909 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json
@@ -5,7 +5,7 @@
{
"id": "appc-sdnhost",
"name": "APPC SDN Host Health Check",
- "http": "http://appc-sdnhost.{{ .Values.nsPrefix }}:8282/apidoc/explorer/index.html",
+ "http": "http://appc-sdnhost:8282/apidoc/explorer/index.html",
"method": "HEAD",
"header": {
"Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json b/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json
index d14b16487f..284220e665 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json
@@ -5,7 +5,7 @@
{
"id": "log-elasticsearch-server",
"name": "Log Elastic Search Health Check",
- "http": "http://elasticsearch.{{ .Values.nsPrefix }}:9200/_cluster/health?pretty",
+ "http": "http://elasticsearch:9200/_cluster/health?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -14,7 +14,7 @@
{
"id": "log-elasticsearch-tcp",
"name": "Log Elastic Search TCP Health Check",
- "tcp": "elasticsearchtcp.{{ .Values.nsPrefix }}:9300",
+ "tcp": "elasticsearchtcp:9300",
"interval": "15s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json b/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json
index cc72bbf3e2..09cc85e9a7 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json
@@ -5,7 +5,7 @@
{
"id": "log-kibana-server",
"name": "Log kibana Health Check",
- "http": "http://kibana.{{ .Values.nsPrefix }}:5601/status",
+ "http": "http://kibana:5601/status",
"method": "HEAD",
"tls_skip_verify": true,
"interval": "15s",
diff --git a/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json b/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json
index 9eb60fdf8a..87b51dbbbc 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json
@@ -5,7 +5,7 @@
{
"id": "log-logstash-internal-server-gi",
"name": "Log Stash Health Check - General Information",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/?pretty",
+ "http": "http://logstashinternal:9600/?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -14,7 +14,7 @@
{
"id": "log-logstash-internal-server-node-info",
"name": "Log Stash Health Check - Node Information",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/?pretty",
+ "http": "http://logstashinternal:9600/_node/?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -23,7 +23,7 @@
{
"id": "log-logstash-internal-server-os-info",
"name": "Log Stash Health Check - OS Information",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/os?pretty",
+ "http": "http://logstashinternal:9600/_node/os?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -32,7 +32,7 @@
{
"id": "log-logstash-internal-server-jvm-info",
"name": "Log Stash Health Check - JVM Information",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/jvm?pretty",
+ "http": "http://logstashinternal:9600/_node/jvm?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -41,7 +41,7 @@
{
"id": "log-logstash-internal-server-plugin-info",
"name": "Log Stash Health Check - Plugin Information",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/plugins?pretty",
+ "http": "http://logstashinternal:9600/_node/plugins?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -50,7 +50,7 @@
{
"id": "log-logstash-internal-server-node-stat",
"name": "Log Stash Health Check - Node Stats",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats?pretty",
+ "http": "http://logstashinternal:9600/_node/stats?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -59,7 +59,7 @@
{
"id": "log-logstash-internal-server-jvm-stat",
"name": "Log Stash Health Check - JVM Stats",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/jvm?pretty",
+ "http": "http://logstashinternal:9600/_node/stats/jvm?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -68,7 +68,7 @@
{
"id": "log-logstash-internal-server-process-stat",
"name": "Log Stash Health Check - Process Stats",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/process?pretty",
+ "http": "http://logstashinternal:9600/_node/stats/process?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -77,7 +77,7 @@
{
"id": "log-logstash-internal-server-os-stat",
"name": "Log Stash Health Check - OS Stats",
- "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/os?pretty",
+ "http": "http://logstashinternal:9600/_node/stats/os?pretty",
"method": "GET",
"tls_skip_verify": true,
"interval": "15s",
@@ -86,7 +86,7 @@
{
"id": "log-logstash-tcp",
"name": "Log Stash File Beat TCP Health Check",
- "tcp": "logstash.{{ .Values.nsPrefix }}:5044",
+ "tcp": "logstash:5044",
"interval": "15s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json b/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json
index aa345ac929..386e226168 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json
@@ -2,7 +2,7 @@
"service": {
"name": "Health Check: Message Router - DMaaP",
"check": {
- "http": "http://message-router.{{ .Release.Namespace }}:3904/topics",
+ "http": "http://message-router:3904/topics",
"interval": "30s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/msb-health.json b/kubernetes/consul/resources/config/consul-agent-config/msb-health.json
index d15c2ef829..ad4e422be1 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/msb-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/msb-health.json
@@ -3,34 +3,34 @@
"name": "Health Check: MSB",
"checks": [
{
- "id": "msb-eag.{{ .Values.nsPrefix }}",
+ "id": "msb-eag",
"name": "MSB eag Health Check",
- "http": "http://msb-eag.{{ .Values.nsPrefix }}:80/iui/microservices/default.html",
+ "http": "http://msb-eag:80/iui/microservices/default.html",
"method": "HEAD",
"tls_skip_verify": true,
"interval": "15s",
"timeout": "1s"
},
{
- "id": "msb-iag.{{ .Values.nsPrefix }}",
+ "id": "msb-iag",
"name": "MSB iag Health Check",
- "http": "http://msb-iag.{{ .Values.nsPrefix }}:80/iui/microservices/default.html",
+ "http": "http://msb-iag:80/iui/microservices/default.html",
"method": "HEAD",
"tls_skip_verify": true,
"interval": "15s",
"timeout": "1s"
},
{
- "id": "msb-consul.{{ .Values.nsPrefix }}",
+ "id": "msb-consul",
"name": "MSB consul Health Check",
- "tcp": "msb-consul.{{ .Values.nsPrefix }}:8500",
+ "tcp": "msb-consul:8500",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "msb-discovery.{{ .Values.nsPrefix }}",
+ "id": "msb-discovery",
"name": "MSB discovery Health Check",
- "tcp": "msb-discovery.{{ .Values.nsPrefix }}:10081",
+ "tcp": "msb-discovery:10081",
"interval": "15s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/mso-health.json b/kubernetes/consul/resources/config/consul-agent-config/mso-health.json
deleted file mode 100644
index 1df7714a06..0000000000
--- a/kubernetes/consul/resources/config/consul-agent-config/mso-health.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
- "service": {
- "name": "Health Check: MSO",
- "checks": [
- {
- "id": "mso-api-healthcheck",
- "name": "MSO API Health Check",
- "script": "/consul/scripts/mso-api-script.sh",
- "interval": "10s",
- "timeout": "1s"
- },
- {
- "id": "mso-camunda-healthcheck",
- "name": "MSO Camunda Health Check",
- "script": "/consul/scripts/mso-camunda-script.sh",
- "interval": "10s",
- "timeout": "1s"
- },
- {
- "id": "mso-jra-healthcheck",
- "name": "MSO JRA Health Check",
- "script": "/consul/scripts/mso-jra-script.sh",
- "interval": "10s",
- "timeout": "1s"
- }
- ]
- }
-}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json b/kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json
deleted file mode 100644
index 54bd2ef1fe..0000000000
--- a/kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "service": {
- "name": "Health Check: MSO - MariaDb",
- "checks": [
- {
- "id": "mso-mariadb",
- "name": "MSO Mariadb Health Check",
- "script": "/consul/scripts/mso-mariadb-script.sh",
- "interval": "10s",
- "timeout": "1s"
- }
- ]
-
- }
-}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json b/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json
index dba7c77307..3757f4a6ac 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json
@@ -5,7 +5,7 @@
{
"id": "framework",
"name": "Framework Health Check",
- "http": "http://framework.{{ .Values.nsPrefix }}:9001/api/multicloud/v0/swagger.json",
+ "http": "http://framework:9001/api/multicloud/v0/swagger.json",
"method": "HEAD",
"header": {
"Cache-Control": ["no-cache"],
@@ -19,7 +19,7 @@
{
"id": "multicloud-ocata",
"name": "Multicloud Ocata Health Check",
- "http": "http://multicloud-ocata.{{ .Values.nsPrefix }}:9006/api/multicloud-ocata/v0/swagger.json",
+ "http": "http://multicloud-ocata:9006/api/multicloud-ocata/v0/swagger.json",
"method": "HEAD",
"header": {
"Cache-Control": ["no-cache"],
@@ -33,7 +33,7 @@
{
"id": "multicloud-vio",
"name": "Multicloud Vio Health Check",
- "http": "http://multicloud-vio.{{ .Values.nsPrefix }}:9004/api/multicloud-vio/v0/swagger.json",
+ "http": "http://multicloud-vio:9004/api/multicloud-vio/v0/swagger.json",
"method": "HEAD",
"header": {
"Cache-Control": ["no-cache"],
@@ -47,7 +47,7 @@
{
"id": "multicloud-windriver",
"name": "Multicloud Windriver Health Check",
- "http": "http://multicloud-windriver.{{ .Values.nsPrefix }}:9005/api/multicloud-titanium_cloud/v0/swagger.json",
+ "http": "http://multicloud-windriver:9005/api/multicloud-titanium_cloud/v0/swagger.json",
"method": "HEAD",
"header": {
"Cache-Control": ["no-cache"],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh
index 3d26f6e71e..9ee0945d75 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh
@@ -1,6 +1,6 @@
-if curl -s -X PUT http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite -d @/consul/scripts/aai-search-storage-write-doc.txt | grep '\"created\":true'; then
- if curl -s -X DELETE http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"failed\":0'; then
- if curl -s -X GET http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"found\":false'; then
+if curl -s -X PUT http://aai-elasticsearch:9200/searchhealth/stats/testwrite -d @/consul/scripts/aai-search-storage-write-doc.txt | grep '\"created\":true'; then
+ if curl -s -X DELETE http://aai-elasticsearch:9200/searchhealth/stats/testwrite | grep '\"failed\":0'; then
+ if curl -s -X GET http://aai-elasticsearch:9200/searchhealth/stats/testwrite | grep '\"found\":false'; then
echo Successful PUT, DELETE, GET from Search Document Storage 2>&1
exit 0
else
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh
index 5f91c5e89c..112809ce43 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh
@@ -1,6 +1,6 @@
-APPC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "appc-dbhost-[^[:space:]]*")
+APPC_DBHOST_POD=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "appc-dbhost-[^[:space:]]*")
if [ -n "$APPC_DBHOST_POD" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $APPC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $APPC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
echo Success. APPC DBHost is running. 2>&1
exit 0
else
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh
index 035e7c8a38..c85efc3c17 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh
@@ -1,8 +1,8 @@
-NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-data-router[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "aai-data-router[^[:space:]]*")
if [ -n "$NAME" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'data-router' > /dev/null; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $NAME -- ps -efww | grep 'java' | grep 'data-router' > /dev/null; then
echo Success. Synapse process is running. 2>&1
exit 0
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh
index 9a4b4df28f..a8babec102 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh
@@ -1,8 +1,8 @@
-NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-model-loader[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "aai-model-loader[^[:space:]]*")
if [ -n "$NAME" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'model-loader' > /dev/null; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $NAME -- ps -efww | grep 'java' | grep 'model-loader' > /dev/null; then
echo Success. Model Loader process is running. 2>&1
exit 0
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh
index a109032d3b..6630117e92 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh
@@ -1,6 +1,6 @@
-kafkapod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-global-kafka-[^[:space:]]*")
+kafkapod=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "message-router-global-kafka-[^[:space:]]*")
if [ -n "$kafkapod" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $kafkapod -- ps ef | grep -i kafka; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $kafkapod -- ps ef | grep -i kafka; then
echo Success. Kafka process is running. 2>&1
exit 0
else
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh
index 47c42d54ef..fb34057bfc 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh
@@ -1,6 +1,6 @@
-zkpod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-zookeeper-[^[:space:]]*")
+zkpod=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "message-router-zookeeper-[^[:space:]]*")
if [ -n "$zkpod" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $zkpod -- ps ef | grep -i zookeeper; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $zkpod -- ps ef | grep -i zookeeper; then
echo Success. Zookeeper process is running. 2>&1
exit 0
else
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh
index 00a05648d3..f007e62226 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh
@@ -1,5 +1,5 @@
## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_ENDPOINT="http://sdc-fe:8181/sdc1/rest/healthCheck"
HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
## Strip out the ON_BOARDING section from the response XML (otherwise we will
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh
index 9950cc9fdf..a9ba31a266 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh
@@ -1,5 +1,5 @@
## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_ENDPOINT="http://sdc-fe:8181/sdc1/rest/healthCheck"
HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
## Strip out the ON_BOARDING section from the response XML (otherwise we will
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh
index 27f3b224c6..4e3664ba73 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh
@@ -1,5 +1,5 @@
## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_ENDPOINT="http://sdc-fe:8181/sdc1/rest/healthCheck"
HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
## Strip out the ON_BOARDING section from the response XML (otherwise we will
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh
index c5955f3be3..85fc4319ff 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh
@@ -1,5 +1,5 @@
## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_ENDPOINT="http://sdc-fe:8181/sdc1/rest/healthCheck"
HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
## Strip out the ON_BOARDING section from the response XML (otherwise we will
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh
index 27b9b9f608..74064816a2 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh
@@ -1,6 +1,6 @@
-SDNC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "sdnc-dbhost-[^[:space:]]*")
+SDNC_DBHOST_POD=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "sdnc-dbhost-[^[:space:]]*")
if [ -n "$SDNC_DBHOST_POD" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
echo Success. SDNC DBHost is running. 2>&1
exit 0
else
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh
index d5118736cd..12bf9bf061 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-SEARCH_SERVICE_NAME="search-data-service.{{ .Values.nsPrefix }}"
+SEARCH_SERVICE_NAME="search-data-service.{{ include "common.namespace" . }}"
SEARCH_SERVICE_PORT=9509
HEALTH_CHECK_INDEX="healthcheck"
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/so-api-script.sh
index 341ff193e9..047d2cb6f1 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/so-api-script.sh
@@ -1,5 +1,5 @@
## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/mso/healthcheck"
+HEALTH_CHECK_ENDPOINT="http://so:8080/ecomp/mso/infra/healthcheck"
HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/so-camunda-script.sh
index beeb289517..9ad3ffc5cf 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/so-camunda-script.sh
@@ -1,5 +1,5 @@
## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/networks/rest/healthcheck"
+HEALTH_CHECK_ENDPOINT="http://so:8080/mso/healthcheck"
HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/so-jra-script.sh
index 8f3f85ce5e..96d9c7a765 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/so-jra-script.sh
@@ -1,5 +1,5 @@
## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/ecomp/mso/infra/healthcheck"
+HEALTH_CHECK_ENDPOINT="http://so:8080/networks/rest/healthcheck"
HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/so-mariadb-script.sh
index aa73a73050..a80a790377 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/so-mariadb-script.sh
@@ -1,7 +1,7 @@
-NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "mso-mariadb[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "so-db[^[:space:]]*")
if [ -n "$NAME" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
echo Success. mariadb process is running. 2>&1
exit 0
else
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh
index 7796681902..15175aa80f 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh
@@ -1,8 +1,8 @@
-NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-sparky-be[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "aai-sparky-be[^[:space:]]*")
if [ -n "$NAME" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'sparky' > /dev/null; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $NAME -- ps -efww | grep 'java' | grep 'sparky' > /dev/null; then
echo Success. UI Backend Service process is running. 2>&1
exit 0
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh
index dc7768fc6e..fe21db504b 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh
@@ -1,6 +1,6 @@
# Query the Hbase service for the cluster status.
-GET_CLUSTER_STATUS_RESPONSE=$(curl -si -X GET -H "Accept: text/xml" http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster)
+GET_CLUSTER_STATUS_RESPONSE=$(curl -si -X GET -H "Accept: text/xml" http://hbase:8080/status/cluster)
if [ -z "$GET_CLUSTER_STATUS_RESPONSE" ]; then
echo "Tabular store is unreachable."
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh
index bbb080f44d..bc250113aa 100755
--- a/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh
@@ -1,7 +1,7 @@
-NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "vid-mariadb[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "vid-mariadb[^[:space:]]*")
if [ -n "$NAME" ]; then
- if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
echo Success. mariadb process is running. 2>&1
exit 0
else
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json
index ec8ec86c25..105635b592 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json
@@ -33,7 +33,7 @@
{
"id": "sdc-catalog-healthcheck",
"name": "SDC Catalog Health Check",
- "http": "https://sdc-be.{{ .Values.nsPrefix }}:8443/asdc/v1/catalog/services",
+ "http": "https://sdc-be:8443/asdc/v1/catalog/services",
"header": {
"Authorization": ["Basic dmlkOktwOGJKNFNYc3pNMFdYbGhhazNlSGxjc2UyZ0F3ODR2YW9HR21KdlV5MlU="],
"X-ECOMP-InstanceID": ["VID"],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json
index 6ae14afac6..72e6be9093 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json
@@ -5,7 +5,7 @@
{
"id": "sdnc-dgbuilder",
"name": "SDNC-DGbuilder Health Check",
- "http": "http://sdnc-dgbuilder.{{ .Values.nsPrefix }}:3000/",
+ "http": "http://sdnc-dgbuilder:3000/",
"method": "HEAD",
"header": {
"Authorization": ["Basic ZGd1c2VyOnRlc3QxMjM="],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json
index 0ee5e8951d..5f42835cf7 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json
@@ -5,7 +5,7 @@
{
"id": "odl-api-healthcheck",
"name": "SDNC API Health Check",
- "http": "http://sdnhost.{{ .Values.nsPrefix }}:8282/restconf/operations/SLI-API:healthcheck",
+ "http": "http://sdnc:8282/restconf/operations/SLI-API:healthcheck",
"method": "POST",
"header": {
"Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json
index 092df058e3..d03ce90820 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json
@@ -5,7 +5,7 @@
{
"id": "sdnc-portal",
"name": "SDNC Portal Health Check",
- "http": "http://sdnc-portal.{{ .Values.nsPrefix }}:8843/login",
+ "http": "http://sdnc-portal:8843/login",
"method": "HEAD",
"header": {
"Cache-Control": ["no-cache"],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json
index ed4a29d7f6..e684c09da8 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json
@@ -3,9 +3,9 @@
"name": "Health Check: SDNC-SDN-CTL-DB-01",
"checks": [
{
- "id": "sdnctldb01.{{ .Values.nsPrefix }}",
+ "id": "sdnctldb01",
"name": "SDNC SDNCTLDB01 Health Check",
- "tcp": "sdnctldb01.{{ .Values.nsPrefix }}:3306",
+ "tcp": "sdnc-dbhost:3306",
"interval": "10s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json
index 8c4700b6f0..ae4820a3e6 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json
@@ -3,9 +3,9 @@
"name": "Health Check: SDNC-SDN-CTL-DB-02",
"checks": [
{
- "id": "sdnctldb02.{{ .Values.nsPrefix }}",
+ "id": "sdnctldb02",
"name": "SDNC SDNCTLDB02 Health Check",
- "tcp": "sdnctldb02.{{ .Values.nsPrefix }}:3306",
+ "tcp": "sdnc-dbhost:3306",
"interval": "10s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json
index 585b9c0b69..604b5be901 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json
@@ -5,7 +5,7 @@
{
"id": "sdnc-sdnhost",
"name": "SDNC SDN Host Health Check",
- "http": "http://sdnhost.{{ .Values.nsPrefix }}:8282/apidoc/explorer/index.html",
+ "http": "http://sdnhost:8282/apidoc/explorer/index.html",
"method": "HEAD",
"header": {
"Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/so-health.json b/kubernetes/consul/resources/config/consul-agent-config/so-health.json
new file mode 100644
index 0000000000..565c4a4a7a
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/so-health.json
@@ -0,0 +1,28 @@
+{
+ "service": {
+ "name": "Health Check: SO",
+ "checks": [
+ {
+ "id": "so-api-healthcheck",
+ "name": "SO API Health Check",
+ "script": "/consul/scripts/so-api-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "so-camunda-healthcheck",
+ "name": "SO Camunda Health Check",
+ "script": "/consul/scripts/so-camunda-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ },
+ {
+ "id": "so-jra-healthcheck",
+ "name": "SO JRA Health Check",
+ "script": "/consul/scripts/so-jra-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/so-mariabdb.json b/kubernetes/consul/resources/config/consul-agent-config/so-mariabdb.json
new file mode 100644
index 0000000000..fb554208e4
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/so-mariabdb.json
@@ -0,0 +1,15 @@
+{
+ "service": {
+ "name": "Health Check: SO - MariaDb",
+ "checks": [
+ {
+ "id": "so-mariadb",
+ "name": "SO Mariadb Health Check",
+ "script": "/consul/scripts/so-mariadb-script.sh",
+ "interval": "10s",
+ "timeout": "1s"
+ }
+ ]
+
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json b/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json
index 25e670c7da..3661ac708b 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json
@@ -3,114 +3,114 @@
"name": "Health Check: VFC",
"checks": [
{
- "id": "vfc-catalog.{{ .Values.nsPrefix }}",
+ "id": "vfc-catalog",
"name": "VFC catalog Health Check",
- "tcp": "vfc-catalog.{{ .Values.nsPrefix }}:8806",
+ "tcp": "vfc-catalog:8806",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-emsdriver.{{ .Values.nsPrefix }}",
+ "id": "vfc-emsdriver",
"name": "VFC emsdriver Health Check",
- "tcp": "vfc-emsdriver.{{ .Values.nsPrefix }}:8206",
+ "tcp": "vfc-ems-driver:8206",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-gvnfmdriver.{{ .Values.nsPrefix }}",
+ "id": "vfc-gvnfmdriver",
"name": "VFC gvnfmdriver Health Check",
- "tcp": "vfc-gvnfmdriver.{{ .Values.nsPrefix }}:8484",
+ "tcp": "vfc-generic-vnfm-driver:8484",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-hwvnfmdriver.{{ .Values.nsPrefix }}",
+ "id": "vfc-hwvnfmdriver",
"name": "VFC hwvnfmdriver Health Check",
- "tcp": "vfc-hwvnfmdriver.{{ .Values.nsPrefix }}:8482",
+ "tcp": "vfc-huawei-vnfm-driver:8482",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-jujudriver.{{ .Values.nsPrefix }}",
+ "id": "vfc-jujudriver",
"name": "VFC jujudriver Health Check",
- "tcp": "vfc-jujudriver.{{ .Values.nsPrefix }}:8483",
+ "tcp": "vfc-juju-vnfm-driver:8483",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-nokiavnfmdriver.{{ .Values.nsPrefix }}",
+ "id": "vfc-nokiavnfmdriver",
"name": "VFC nokiavnfmdriver Health Check",
- "tcp": "vfc-nokiavnfmdriver.{{ .Values.nsPrefix }}:8486",
+ "tcp": "vfc-nokia-vnfm-driver:8486",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-nokiav2vnfmdriver.{{ .Values.nsPrefix }}",
+ "id": "vfc-nokiav2vnfmdriver",
"name": "VFC nokiav2vnfmdriver Health Check",
- "tcp": "vfc-nokiav2vnfmdriver.{{ .Values.nsPrefix }}:8089",
+ "tcp": "vfc-nokia-v2vnfm-driver:8089",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-nslcm.{{ .Values.nsPrefix }}",
+ "id": "vfc-nslcm",
"name": "VFC nslcm Health Check",
- "tcp": "vfc-nslcm.{{ .Values.nsPrefix }}:8403",
+ "tcp": "vfc-nslcm:8403",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-resmgr.{{ .Values.nsPrefix }}",
+ "id": "vfc-resmgr",
"name": "VFC resmgr Health Check",
- "tcp": "vfc-resmgr.{{ .Values.nsPrefix }}:8480",
+ "tcp": "vfc-resmgr:8480",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-vnflcm.{{ .Values.nsPrefix }}",
+ "id": "vfc-vnflcm",
"name": "VFC vnflcm Health Check",
- "tcp": "vfc-vnflcm.{{ .Values.nsPrefix }}:8801",
+ "tcp": "vfc-vnflcm:8801",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-vnfmgr.{{ .Values.nsPrefix }}",
+ "id": "vfc-vnfmgr",
"name": "VFC vnfmgr Health Check",
- "tcp": "vfc-vnfmgr.{{ .Values.nsPrefix }}:8803",
+ "tcp": "vfc-vnfmgr:8803",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-vnfres.{{ .Values.nsPrefix }}",
+ "id": "vfc-vnfres",
"name": "VFC vnfres Health Check",
- "tcp": "vfc-vnfres.{{ .Values.nsPrefix }}:8802",
+ "tcp": "vfc-vnfres:8802",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-workflow.{{ .Values.nsPrefix }}",
+ "id": "vfc-workflow",
"name": "VFC workflow Health Check",
- "tcp": "vfc-workflow.{{ .Values.nsPrefix }}:10550",
+ "tcp": "vfc-workflow:10550",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-workflowengineactiviti.{{ .Values.nsPrefix }}",
+ "id": "vfc-workflowengineactiviti",
"name": "VFC workflow-engine Health Check",
- "tcp": "vfc-workflowengineactiviti.{{ .Values.nsPrefix }}:8080",
+ "tcp": "vfc-workflow-engine:8080",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-ztesdncdriver.{{ .Values.nsPrefix }}",
+ "id": "vfc-ztesdncdriver",
"name": "VFC ztesdncdriver Health Check",
- "tcp": "vfc-ztesdncdriver.{{ .Values.nsPrefix }}:8411",
+ "tcp": "vfc-zte-sdnc-driver:8411",
"interval": "15s",
"timeout": "1s"
},
{
- "id": "vfc-ztevnfmdriver.{{ .Values.nsPrefix }}",
+ "id": "vfc-ztevnfmdriver",
"name": "VFC ztevnfmdriver Health Check",
- "tcp": "vfc-ztevnfmdriver.{{ .Values.nsPrefix }}:8410",
+ "tcp": "vfc-zte-vnfm-driver:8410",
"interval": "15s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/vid-health.json b/kubernetes/consul/resources/config/consul-agent-config/vid-health.json
index 2dc6f0a9dc..d6d8d4c03d 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/vid-health.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/vid-health.json
@@ -5,7 +5,7 @@
{
"id": "vid-server",
"name": "VID Server Health Check",
- "http": "http://vid-server.{{ .Values.nsPrefix }}:8080/vid/healthCheck",
+ "http": "http://vid:8080/vid/healthCheck",
"method": "GET",
"header": {
"Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
diff --git a/kubernetes/msb/resources/config/log/filebeat/filebeat.yml b/kubernetes/msb/resources/config/log/filebeat/filebeat.yml
index 89c6932577..0bc14ea908 100644
--- a/kubernetes/msb/resources/config/log/filebeat/filebeat.yml
+++ b/kubernetes/msb/resources/config/log/filebeat/filebeat.yml
@@ -21,7 +21,7 @@ output.logstash:
#List of logstash server ip addresses with port number.
#But, in our case, this will be the loadbalancer IP address.
#For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
- hosts: ["logstash.{{.Values.nsPrefix}}:5044"]
+ hosts: ["{{.Values.config.logstashServiceName}}:{{.Values.config.logstashPort}}"]
#If enable will do load balancing among availabe Logstash, automatically.
loadbalance: true
diff --git a/kubernetes/msb/values.yaml b/kubernetes/msb/values.yaml
index 2a0dbe2ffa..d9de3fa97e 100644
--- a/kubernetes/msb/values.yaml
+++ b/kubernetes/msb/values.yaml
@@ -20,3 +20,9 @@ global:
repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
loggingRepository: docker.elastic.co
loggingImage: beats/filebeat:5.5.0
+
+# application configuration
+config:
+ logstashServiceName: log-ls
+ logstashPort: 5044
+
diff --git a/kubernetes/multicloud/charts/multicloud-ocata/templates/NOTES.txt b/kubernetes/multicloud/charts/multicloud-ocata/templates/NOTES.txt
new file mode 100644
index 0000000000..befedf4578
--- /dev/null
+++ b/kubernetes/multicloud/charts/multicloud-ocata/templates/NOTES.txt
@@ -0,0 +1,34 @@
+# Copyright © 2018 Amdocs, AT&T, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ include "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/multicloud/charts/multicloud-ocata/values.yaml b/kubernetes/multicloud/charts/multicloud-ocata/values.yaml
index db0397561a..a1f72eabcc 100644
--- a/kubernetes/multicloud/charts/multicloud-ocata/values.yaml
+++ b/kubernetes/multicloud/charts/multicloud-ocata/values.yaml
@@ -60,6 +60,9 @@ service:
internalPort: 9006
nodePort: 93
+ingress:
+ enabled: false
+
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
diff --git a/kubernetes/multicloud/charts/multicloud-vio/templates/NOTES.txt b/kubernetes/multicloud/charts/multicloud-vio/templates/NOTES.txt
new file mode 100644
index 0000000000..befedf4578
--- /dev/null
+++ b/kubernetes/multicloud/charts/multicloud-vio/templates/NOTES.txt
@@ -0,0 +1,34 @@
+# Copyright © 2018 Amdocs, AT&T, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ include "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/multicloud/charts/multicloud-vio/values.yaml b/kubernetes/multicloud/charts/multicloud-vio/values.yaml
index 187f2b9562..4fea235b5b 100644
--- a/kubernetes/multicloud/charts/multicloud-vio/values.yaml
+++ b/kubernetes/multicloud/charts/multicloud-vio/values.yaml
@@ -60,6 +60,9 @@ service:
internalPort: 9004
nodePort: 92
+ingress:
+ enabled: false
+
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
diff --git a/kubernetes/multicloud/charts/multicloud-windriver/templates/NOTES.txt b/kubernetes/multicloud/charts/multicloud-windriver/templates/NOTES.txt
new file mode 100644
index 0000000000..befedf4578
--- /dev/null
+++ b/kubernetes/multicloud/charts/multicloud-windriver/templates/NOTES.txt
@@ -0,0 +1,34 @@
+# Copyright © 2018 Amdocs, AT&T, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ include "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/multicloud/charts/multicloud-windriver/values.yaml b/kubernetes/multicloud/charts/multicloud-windriver/values.yaml
index 1126284f11..919289c65d 100644
--- a/kubernetes/multicloud/charts/multicloud-windriver/values.yaml
+++ b/kubernetes/multicloud/charts/multicloud-windriver/values.yaml
@@ -60,6 +60,9 @@ liveness:
failureThreshold: 5
enabled: true
+ingress:
+ enabled: false
+
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
diff --git a/kubernetes/multicloud/resources/config/provider-plugin.json b/kubernetes/multicloud/resources/config/provider-plugin.json
new file mode 100644
index 0000000000..0ac4701aa9
--- /dev/null
+++ b/kubernetes/multicloud/resources/config/provider-plugin.json
@@ -0,0 +1,29 @@
+{
+ "openstack": {
+ "vim_type": "openstack",
+ "versions": {
+ "titanium_cloud": {
+ "version": "titanium_cloud",
+ "extra_info_hint": "",
+ "provider_plugin": "multicloud-titanium_cloud"
+ },
+ "ocata": {
+ "version": "ocata",
+ "extra_info_hint": "",
+ "provider_plugin": "multicloud-ocata"
+ }
+ },
+ "provider_plugin": "multicloud-ocata"
+ },
+ "vmware": {
+ "vim_type": "vmware",
+ "versions": {
+ "4.0": {
+ "version": "4.0",
+ "extra_info_hint": "",
+ "provider_plugin": "multicloud-vio"
+ }
+ },
+ "provider_plugin": "multicloud-vio"
+ }
+} \ No newline at end of file
diff --git a/kubernetes/multicloud/templates/NOTES.txt b/kubernetes/multicloud/templates/NOTES.txt
new file mode 100644
index 0000000000..befedf4578
--- /dev/null
+++ b/kubernetes/multicloud/templates/NOTES.txt
@@ -0,0 +1,34 @@
+# Copyright © 2018 Amdocs, AT&T, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ include "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/multicloud/templates/configmap.yaml b/kubernetes/multicloud/templates/configmap.yaml
index e420fb99f7..dc356a3d9a 100644
--- a/kubernetes/multicloud/templates/configmap.yaml
+++ b/kubernetes/multicloud/templates/configmap.yaml
@@ -29,3 +29,11 @@ metadata:
namespace: {{ include "common.namespace" . }}
data:
{{ tpl (.Files.Glob "resources/config/log/framework/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-provider-plugin-configmap
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/provider-plugin.json").AsConfig . | indent 2 }}
diff --git a/kubernetes/multicloud/templates/deployment.yaml b/kubernetes/multicloud/templates/deployment.yaml
index f42d5f3f8f..3a0671c911 100644
--- a/kubernetes/multicloud/templates/deployment.yaml
+++ b/kubernetes/multicloud/templates/deployment.yaml
@@ -61,6 +61,9 @@ spec:
- mountPath: /opt/multivimbroker/multivimbroker/pub/config/log.yml
name: framework-logconfig
subPath: log.yml
+ - mountPath: /opt/multivimbroker/multivimbroker/pub/config/provider-plugin.json
+ name: provider-plugin
+ subPath: provider-plugin.json
ports:
- containerPort: {{ .Values.service.internalPort }}
# disable liveness probe when breakpoints set in debugger
@@ -104,6 +107,9 @@ spec:
volumes:
- name: framework-log
emptyDir: {}
+ - name: provider-plugin
+ configMap:
+ name: {{ include "common.fullname" . }}-provider-plugin-configmap
- name: framework-data-filebeat
emptyDir: {}
- name: filebeat-conf
diff --git a/kubernetes/onap/values.yaml b/kubernetes/onap/values.yaml
index 470fdfb031..6107f3d44e 100644
--- a/kubernetes/onap/values.yaml
+++ b/kubernetes/onap/values.yaml
@@ -58,6 +58,14 @@ aai:
enabled: true
appc:
enabled: true
+ config:
+ openStackType: OpenStackProvider
+ openStackName: OpenStack
+ openStackKeyStoneUrl: http://localhost:8181/apidoc/explorer/index.html
+ openStackServiceTenantName: default
+ openStackDomain: default
+ openStackUserName: admin
+ openStackEncryptedPassword: admin
clamp:
enabled: true
cli:
diff --git a/kubernetes/policy/resources/config/pe/console.conf b/kubernetes/policy/resources/config/pe/console.conf
index 0251048040..d972d7ca7a 100644
--- a/kubernetes/policy/resources/config/pe/console.conf
+++ b/kubernetes/policy/resources/config/pe/console.conf
@@ -135,12 +135,12 @@ onap_application_name=
#-----------------------ONAP-PORTAL-Properties----------------------
-ONAP_REDIRECT_URL=http://portalapps:8989/ONAPPORTAL/login.htm
-ONAP_REST_URL=http://portalapps:8989/ONAPPORTAL/auxapi
+ONAP_REDIRECT_URL=http://portal-app.{{.Release.Namespace}}:8989/ONAPPORTAL/login.htm
+ONAP_REST_URL=http://portal-app.{{.Release.Namespace}}:8989/ONAPPORTAL/auxapi
ONAP_UEB_URL_LIST=
ONAP_PORTAL_INBOX_NAME=
ONAP_UEB_APP_KEY=
ONAP_UEB_APP_SECRET=
ONAP_UEB_APP_MAILBOX_NAME=
APP_DISPLAY_NAME=ONAP Policy
-ONAP_SHARED_CONTEXT_REST_URL=http://portalapps:8989/ONAPPORTAL/context
+ONAP_SHARED_CONTEXT_REST_URL=http://portal-app.{{.Release.Namespace}}:8989/ONAPPORTAL/context
diff --git a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/fusion.properties b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/fusion.properties
index 876c5c7b9a..9c6fc5104f 100755
--- a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/fusion.properties
+++ b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/fusion.properties
@@ -4,7 +4,7 @@
# validator settings
#default_error_message = Default error message
-login_url_no_ret_val = http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/login.htm
+login_url_no_ret_val = http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/login.htm
user_attribute_name = user
diff --git a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/openid-connect.properties b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/openid-connect.properties
index 2a33551933..f99b2234ea 100755
--- a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/openid-connect.properties
+++ b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/openid-connect.properties
@@ -1,3 +1,3 @@
-authentication_server_url = http://portal.api.simpledemo.onap.org:8383/openid-connect-server-webapp/
-ecomp_openid_connect_client = http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/openid_connect_login
-ecomp_redirect_uri = http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/welcome.htm
+authentication_server_url = http://{{.Values.global.portalHostName}}:8383/openid-connect-server-webapp/
+ecomp_openid_connect_client = http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/openid_connect_login
+ecomp_redirect_uri = http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/welcome.htm
diff --git a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/portal.properties b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/portal.properties
index 6157f29356..7561e3acf6 100755
--- a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/portal.properties
+++ b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/portal.properties
@@ -5,10 +5,10 @@ max.idle.time = 5
user.attribute.name = user_attribute
# for single sign on
-ecomp_redirect_url = http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/login.htm
+ecomp_redirect_url = http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/login.htm
# URL of the ECOMP Portal REST API
-ecomp_rest_url = http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/auxapi
+ecomp_rest_url = http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/auxapi
ueb_listeners_enable = true
diff --git a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/system.properties b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/system.properties
index 47c73cd6a1..109d8d24d2 100755
--- a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/system.properties
+++ b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/system.properties
@@ -44,7 +44,7 @@ log_cron = 0 * * * * ? *
sessiontimeout_feed_cron = 0 0/5 * * * ? *
#Front end URL
-frontend_url = http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/applicationsHome
+frontend_url = http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/applicationsHome
# An Unqiue 128-bit value defined to indentify a specific version of
@@ -70,7 +70,7 @@ online_user_update_rate = 30
online_user_update_duration = 300
#authenticate user server
-authenticate_user_server=http://portal.api.simpledemo.onap.org:8383/openid-connect-server-webapp/allUsers
+authenticate_user_server=http://{{.Values.global.portalHostName}}:8383/openid-connect-server-webapp/allUsers
#window width threshold to collapse left/right menu when page onload
window_width_threshold_left_menu = 1400
diff --git a/kubernetes/portal/charts/portal-app/templates/service.yaml b/kubernetes/portal/charts/portal-app/templates/service.yaml
index f9339e671a..4ccf2b02b8 100644
--- a/kubernetes/portal/charts/portal-app/templates/service.yaml
+++ b/kubernetes/portal/charts/portal-app/templates/service.yaml
@@ -29,14 +29,14 @@ metadata:
"version": "v2",
"url": "/",
"protocol": "REST"
- "port": "8989",
+ "port": "{{ .Values.service.externalPort }}",
"visualRange":"1"
}
]'
spec:
type: {{ .Values.service.type }}
ports:
- {{if eq .Values.service.type "NodePort" -}}
+ {{- if or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer") }}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
@@ -59,7 +59,7 @@ spec:
- port: {{ .Values.service.externalPort3 }}
targetPort: {{ .Values.service.internalPort3 }}
name: {{ .Values.service.portName }}3
- {{- end}}
+ {{- end }}
selector:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
diff --git a/kubernetes/portal/charts/portal-app/values.yaml b/kubernetes/portal/charts/portal-app/values.yaml
index b0ea028f6f..657fa18039 100644
--- a/kubernetes/portal/charts/portal-app/values.yaml
+++ b/kubernetes/portal/charts/portal-app/values.yaml
@@ -31,9 +31,6 @@ repository: nexus3.onap.org:10001
image: onap/portal-app:2.1-STAGING-latest
pullPolicy: Always
-# flag to enable debugging - application support required
-debugEnabled: false
-
# default number of instances
replicaCount: 1
@@ -54,7 +51,7 @@ readiness:
periodSeconds: 10
service:
- type: NodePort
+ type: LoadBalancer
name: portal-app
portName: portal-app
externalPort: 8989
diff --git a/kubernetes/portal/charts/portal-cassandra/templates/deployment.yaml b/kubernetes/portal/charts/portal-cassandra/templates/deployment.yaml
index 920efbcd49..9e1a57d6fa 100644
--- a/kubernetes/portal/charts/portal-cassandra/templates/deployment.yaml
+++ b/kubernetes/portal/charts/portal-cassandra/templates/deployment.yaml
@@ -63,6 +63,8 @@ spec:
value: "{{ .Values.config.cassandraUsername }}"
- name: CASSPASS
value: "{{ .Values.config.cassandraPassword }}"
+ - name: JVM_OPTS
+ value: "{{ .Values.config.cassandraJvmOpts }}"
- name: POD_IP
valueFrom:
fieldRef:
diff --git a/kubernetes/portal/charts/portal-cassandra/values.yaml b/kubernetes/portal/charts/portal-cassandra/values.yaml
index ae97f33026..ef73954558 100644
--- a/kubernetes/portal/charts/portal-cassandra/values.yaml
+++ b/kubernetes/portal/charts/portal-cassandra/values.yaml
@@ -30,6 +30,7 @@ pullPolicy: Always
config:
cassandraUsername: root
cassandraPassword: Aa123456
+ cassandraJvmOpts: -Xmx2536m -Xms2536m
# default number of instances
replicaCount: 1
diff --git a/kubernetes/portal/charts/portal-mariadb/resources/config/mariadb/oom_updates.sql b/kubernetes/portal/charts/portal-mariadb/resources/config/mariadb/oom_updates.sql
index 0163d317bb..f6bd05f614 100644
--- a/kubernetes/portal/charts/portal-mariadb/resources/config/mariadb/oom_updates.sql
+++ b/kubernetes/portal/charts/portal-mariadb/resources/config/mariadb/oom_updates.sql
@@ -5,20 +5,20 @@ Any updates required by OOM to the portaldb are made here.
while the OOM K8s version has these service split up.
*/
-- app_url is the FE, app_rest_endpoint is the BE
---portal-sdk => doesnt have a node port so this won't work
-update fn_app set app_url = 'http://portal-sdk.simpledemo.onap.org:8990/ONAPPORTALSDK/welcome.htm', app_rest_endpoint = 'http://portal-sdk:8990/ONAPPORTALSDK/api/v2' where app_name = 'xDemo App';
---dmaap-bc => the dmaap-bc chart actually opens 8080 and 8443, not 8989. the chart isnt merged yet either. confirm the service name after bc chart merge
-update fn_app set app_url = 'http://dmaap-bc.simpledemo.onap.org:8989/ECOMPDBCAPP/dbc#/dmaap', app_rest_endpoint = 'http://dmaap-bc:8989/ECOMPDBCAPP/api/v2' where app_name = 'DMaaP Bus Ctrl';
+--portal-sdk => TODO: doesn't open a node port yet
+update fn_app set app_url = 'http://{{.Values.config.portalSdkHostName}}:{{.Values.config.portalSdkPort}}/ONAPPORTALSDK/welcome.htm', app_rest_endpoint = 'http://portal-sdk:8990/ONAPPORTALSDK/api/v2' where app_name = 'xDemo App';
+--dmaap-bc => the dmaap-bc doesn't open a node port..
+update fn_app set app_url = 'http://{{.Values.config.dmaapBcHostName}}:{{.Values.config.dmaapBcPort}}/ECOMPDBCAPP/dbc#/dmaap', app_rest_endpoint = 'http://dmaap-bc:8989/ECOMPDBCAPP/api/v2' where app_name = 'DMaaP Bus Ctrl';
--sdc-be => 8443:30204, 8080:30205
--sdc-fe => 8181:30206, 9443:30207
-update fn_app set app_url = 'http://sdc.api.fe.simpledemo.onap.org:8181/sdc1/portal', app_rest_endpoint = 'http://sdc-be:8080/api/v2' where app_name = 'SDC';
+update fn_app set app_url = 'http://{{.Values.config.sdcFeHostName}}:{{.Values.config.sdcFePort}}/sdc1/portal', app_rest_endpoint = 'http://sdc-be:8080/api/v2' where app_name = 'SDC';
--pap => 8443:30219
-update fn_app set app_url = 'http://policy.api.simpledemo.onap.org:8443/onap/policy', app_rest_endpoint = 'http://pap:8443/onap/api/v2' where app_name = 'Policy';
+update fn_app set app_url = 'http://{{.Values.config.papHostName}}:{{.Values.config.papPort}}/onap/policy', app_rest_endpoint = 'http://pap:8443/onap/api/v2' where app_name = 'Policy';
--vid => 8080:30200
-update fn_app set app_url = 'http://vid.api.simpledemo.onap.org:8080/vid/welcome.htm', app_rest_endpoint = 'http://vid:8080/vid/api/v2' where app_name = 'Virtual Infrastructure Deployment';
---sparky => sparky doesn't open a node port..
-update fn_app set app_url = 'http://aai.api.sparky.simpledemo.onap.org:8080/services/aai/webapp/index.html#/viewInspect', app_rest_endpoint = 'http://aai-sparky-be.{{.Release.Namespace}}:9517/api/v2' where app_name = 'A&AI UI';
+update fn_app set app_url = 'http://{{.Values.config.vidHostName}}:{{.Values.config.vidPort}}/vid/welcome.htm', app_rest_endpoint = 'http://vid:8080/vid/api/v2' where app_name = 'Virtual Infrastructure Deployment';
+--sparky => TODO: sparky doesn't open a node port yet
+update fn_app set app_url = 'http://{{.Values.config.aaiSparkyHostName}}:{{.Values.config.aaiSparkyPort}}/services/aai/webapp/index.html#/viewInspect', app_rest_endpoint = 'http://aai-sparky-be.{{.Release.Namespace}}:9517/api/v2' where app_name = 'A&AI UI';
--cli => 8080:30260
-update fn_app set app_url = 'http://cli.api.simpledemo.onap.org:8080/', app_type = 1 where app_name = 'CLI';
+update fn_app set app_url = 'http://{{.Values.config.cliHostName}}:{{.Values.config.cliPort}}/', app_type = 1 where app_name = 'CLI';
--msb-discovery => 10081:30281 this is clearly incorrect
-update fn_app set app_url = 'http://msb.api.discovery.simpledemo.onap.org:8080/iui/microservices/default.html' where app_name = 'MSB'; \ No newline at end of file
+update fn_app set app_url = 'http://{{.Values.config.msbDiscoveryHostName}}:{{.Values.config.msbDiscoveryPort}}/iui/microservices/default.html' where app_name = 'MSB'; \ No newline at end of file
diff --git a/kubernetes/portal/charts/portal-mariadb/values.yaml b/kubernetes/portal/charts/portal-mariadb/values.yaml
index 8118db330f..7d9b82a629 100644
--- a/kubernetes/portal/charts/portal-mariadb/values.yaml
+++ b/kubernetes/portal/charts/portal-mariadb/values.yaml
@@ -36,7 +36,38 @@ config:
mariadbRootPassword: Aa123456
#The directory where sql files are found in the projects gerrit repo.
sqlSourceDirectory: portal/deliveries
-
+ # sdc frontend assignment for port 8181
+ sdcFePort: "30206"
+ # application's front end hostname. Must be resolvable on the client side environment
+ sdcFeHostName: "sdc.api.fe.simpledemo.onap.org"
+ # policy pap ui assignment for port 8443
+ papPort: "30219"
+ # application's front end hostname. Must be resolvable on the client side environment
+ papHostName: "policy.api.simpledemo.onap.org"
+ # vid ui assignment for port 8080
+ vidPort: "30200"
+ # application's front end hostname. Must be resolvable on the client side environment
+ vidHostName: "vid.api.simpledemo.onap.org"
+ # aai sparky ui assignment for port 8080
+ aaiSparkyPort: "" # TODO: populate with
+ # application's front end hostname. Must be resolvable on the client side environment
+ aaiSparkyHostName: "aai.api.sparky.simpledemo.onap.org"
+ # cli ui assignment for port 8080
+ cliPort: "30260"
+ # application's front end hostname. Must be resolvable on the client side environment
+ cliHostName: "cli.api.simpledemo.onap.org"
+ # portal sdk (demo app) ui assignment for port 8990
+ portalSdkPort: "" # TODO: populate with port
+ # application's front end hostname. Must be resolvable on the client side environment
+ portalSdkHostName: "portal-sdk.simpledemo.onap.org"
+ # dmaap bus controller ui assignment for port ?
+ dmaapBcPort: "" # TODO: populate with
+ # application's front end hostname. Must be resolvable on the client side environment
+ dmaapBcHostName: "dmaap-bc.simpledemo.onap.org"
+ # msb discovery ui assignment for port ?
+ msbDiscoveryPort: "30281"
+ # application's front end hostname. Must be resolvable on the client side environment
+ msbDiscoveryHostName: "msb.api.discovery.simpledemo.onap.org"
# default number of instances
replicaCount: 1
diff --git a/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/portal.properties b/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/portal.properties
index 2b199cca5c..f161609087 100755
--- a/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/portal.properties
+++ b/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/portal.properties
@@ -20,11 +20,11 @@ use_rest_for_functional_menu=true
portal.api.impl.class = org.onap.portalapp.service.OnBoardingApiServiceImpl
# CSP Global Log On for single sign on
-ecomp_redirect_url = http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/login.htm
+ecomp_redirect_url = http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/login.htm
# URL of the ECOMP Portal REST API
-ecomp_rest_url = http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/auxapi
+ecomp_rest_url = http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/auxapi
# Applications do not need to run a UEB listener in 1610.
ueb_listeners_enable = false
diff --git a/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/system.properties b/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/system.properties
index 531c96cccc..2d92cd3b66 100755
--- a/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/system.properties
+++ b/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/system.properties
@@ -62,7 +62,7 @@ guard_notebook_url=
#authenticate user server
#TODO: what is this URL supposed to be pointing to? Nothing in portal opens 8383
-authenticate_user_server=http://portal.api.simpledemo.onap.org:8383/openid-connect-server-webapp/allUsers
+authenticate_user_server=http://{{.Values.global.portalHostName}}:8383/openid-connect-server-webapp/allUsers
#cookie domain
cookie_domain = onap.org
diff --git a/kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties b/kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties
index 547a882c98..738834902c 100644
--- a/kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties
+++ b/kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties
@@ -19,7 +19,7 @@ security.user.name=widget_user
security.user.password=ENC(IjywcRnI9+nuVEh9+OFFiRWAjBT1n718)
initialization.default.widgets=true
-initialization.widgetData.url=http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/commonWidgets
+initialization.widgetData.url=http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/commonWidgets
## Account Basic Authentication Properties
account.user.name=portal
diff --git a/kubernetes/portal/launch-onap-portal.sh b/kubernetes/portal/launch-onap-portal.sh
deleted file mode 100755
index d9f64c121b..0000000000
--- a/kubernetes/portal/launch-onap-portal.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/bash
-
-usage () { echo "Usage : $0 <namespace> <helm release name>"; }
-
-READY_JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'
-
-NAMESPACE=$1
-RELEASE_NAME=$2
-
-if [ ! "$NAMESPACE" ] || [ ! "$RELEASE_NAME" ]
-then
- usage
- exit 1
-fi
-
-PORTAL_POD_NAME=`kubectl get pods --namespace $NAMESPACE --selector=app=portal-app,release=$RELEASE_NAME \
--o jsonpath='{.items[*].metadata.name}' -o jsonpath="$READY_JSONPATH" | grep "Ready=True"|awk -F: '{print $1}' `
-SDC_POD_NAME=`kubectl get pods --namespace $NAMESPACE --selector=app=sdc-fe,release=$RELEASE_NAME \
--o jsonpath='{.items[*].metadata.name}' -o jsonpath="$READY_JSONPATH" | grep "Ready=True" |awk -F: '{print $1}'`
-VID_POD_NAME=`kubectl get pods --namespace $NAMESPACE --selector=app=vid,release=$RELEASE_NAME \
--o jsonpath='{.items[*].metadata.name}' -o jsonpath="$READY_JSONPATH" | grep "Ready=True" |awk -F: '{print $1}'`
-POLICY_POD_NAME=`kubectl get pods --namespace $NAMESPACE --selector=app=pap,release=$RELEASE_NAME \
--o jsonpath='{.items[*].metadata.name}' -o jsonpath="$READY_JSONPATH" | grep "Ready=True" |awk -F: '{print $1}'`
-PORTALSDK_POD_NAME=`kubectl get pods --namespace $NAMESPACE --selector=app=portal-sdk,release=$RELEASE_NAME \
--o jsonpath='{.items[*].metadata.name}' -o jsonpath="$READY_JSONPATH" | grep "Ready=True" |awk -F: '{print $1}'`
-
-#TODO: Add more as testing progresses
-[[ -z "$PORTAL_POD_NAME" ]] && { echo "WARNING: portal-app is not running in your Kubernetes cluster"; }
-[[ -z "$SDC_POD_NAME" ]] && { echo "WARNING: sdc-fe is not running in your Kubernetes cluster"; }
-[[ -z "$VID_POD_NAME" ]] && { echo "WARNING: vid is not running in your Kubernetes cluster"; }
-[[ -z "$POLICY_POD_NAME" ]] && { echo "WARNING: pap is not running in your Kubernetes cluster"; }
-[[ -z "$PORTALSDK_POD_NAME" ]] && { echo "WARNING: portal-sdk is not running in your Kubernetes cluster"; }
-
-if [ ! -z "$PORTAL_POD_NAME" ]
-then
- kubectl -n $NAMESPACE port-forward "$PORTAL_POD_NAME" 8989:8080 &
- PORTAL_PID=$!
-fi
-
-if [ ! -z "$VID_POD_NAME" ]
-then
- kubectl -n $NAMESPACE port-forward "$VID_POD_NAME" 8080:8080 &
- VID_PID=$!
-fi
-
-if [ ! -z "$SDC_POD_NAME" ]
-then
- kubectl -n $NAMESPACE port-forward "$SDC_POD_NAME" 8181:8181 &
- SDC_PID=$!
-fi
-
-if [ ! -z "$POLICY_POD_NAME" ]
-then
- kubectl -n $NAMESPACE port-forward "$POLICY_POD_NAME" 8443:8443 &
- POLICY_PID=$!
-fi
-
-if [ ! -z "$PORTALSDK_POD_NAME" ]
-then
- kubectl -n $NAMESPACE port-forward "$PORTALSDK_POD_NAME" 8990:8080 &
- PORTALSDK_PID=$!
-fi
-
-trap "{ kill -9 $PORTAL_PID $VID_PID $SDC_PID $POLICY_PID $PORTALSDK_PID; exit 0; }" INT
-echo -e $'Press Ctrl+C to exit...\n'
-
-while :
-do
- sleep 60
-done \ No newline at end of file
diff --git a/kubernetes/portal/values.yaml b/kubernetes/portal/values.yaml
index 949efad31b..ebbfbc38e1 100644
--- a/kubernetes/portal/values.yaml
+++ b/kubernetes/portal/values.yaml
@@ -15,6 +15,11 @@
global:
env:
tomcatDir: "/opt/apache-tomcat-8.0.37"
+ # portal frontend port
+ portalPort: "8989"
+ # application's front end hostname. Must be resolvable on the client side environment
+ portalHostName: "portal.api.simpledemo.onap.org"
+
config:
logstashServiceName: log-ls
logstashPort: 5044
diff --git a/kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py b/kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py
index 6e566c3354..f5df9fc32d 100755
--- a/kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py
+++ b/kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py
@@ -17,6 +17,7 @@ GLOBAL_APPC_PASSWORD = "admin"
GLOBAL_ASDC_SERVER_PROTOCOL = "http"
GLOBAL_ASDC_FE_PORT = "8181"
GLOBAL_ASDC_BE_PORT = "8080"
+GLOBAL_ASDC_BE_ONBOARD_PORT = "8081"
GLOBAL_ASDC_BE_USERNAME = "beep"
GLOBAL_ASDC_BE_PASSWORD = "boop"
# clamp info - everything is from the private oam network (also called onap private network)
diff --git a/kubernetes/robot/resources/config/eteshare/config/vm_properties.py b/kubernetes/robot/resources/config/eteshare/config/vm_properties.py
index 7bb8c1c13c..585555312c 100755
--- a/kubernetes/robot/resources/config/eteshare/config/vm_properties.py
+++ b/kubernetes/robot/resources/config/eteshare/config/vm_properties.py
@@ -39,6 +39,7 @@ GLOBAL_INJECTED_REGION = "{{ .Values.openStackRegion }}"
GLOBAL_INJECTED_REMOTE_REPO = "http://gerrit.onap.org/r/testsuite/properties.git"
GLOBAL_INJECTED_SCRIPT_VERSION = "{{ .Values.scriptVersion }}"
GLOBAL_INJECTED_SDC_BE_IP_ADDR = "sdc-be.{{include "common.namespace" .}}"
+GLOBAL_INJECTED_SDC_BE_ONBOARD_IP_ADDR = "sdc-onboarding-be.{{include "common.namespace" .}}"
GLOBAL_INJECTED_SDC_FE_IP_ADDR = "sdc-fe.{{include "common.namespace" .}}"
GLOBAL_INJECTED_SDC_IP_ADDR = "N/A"
GLOBAL_INJECTED_SDNC_IP_ADDR = "sdnc.{{include "common.namespace" .}}"
@@ -48,7 +49,7 @@ GLOBAL_INJECTED_UBUNTU_1404_IMAGE = "{{ .Values.ubuntu14Image }}"
GLOBAL_INJECTED_UBUNTU_1604_IMAGE = "{{ .Values.ubuntu16Image }}"
GLOBAL_INJECTED_VID_IP_ADDR = "vid.{{include "common.namespace" .}}"
GLOBAL_INJECTED_VM_FLAVOR = "{{ .Values.openStackFlavourMedium }}"
-GLOBAL_INJECTED_VNFSDK_IP_ADDR = "vnfsdk.{{include "common.namespace" .}}"
+GLOBAL_INJECTED_VNFSDK_IP_ADDR = "refrepo.{{include "common.namespace" .}}"
GLOBAL_INJECTED_PROPERTIES = {
"GLOBAL_INJECTED_AAF_IP_ADDR" : "aaf.{{include "common.namespace" .}}",
@@ -84,11 +85,12 @@ GLOBAL_INJECTED_PROPERTIES = {
"GLOBAL_INJECTED_OPENSTACK_USERNAME" : "{{ .Values.openStackUserName }}",
"GLOBAL_INJECTED_POLICY_IP_ADDR" : "pdp.{{include "common.namespace" .}}",
"GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR" : "drools.{{include "common.namespace" .}}",
- "GLOBAL_INJECTED_PORTAL_IP_ADDR" : "portalapps.{{include "common.namespace" .}}",
+ "GLOBAL_INJECTED_PORTAL_IP_ADDR" : "portal-app.{{include "common.namespace" .}}",
"GLOBAL_INJECTED_PUBLIC_NET_ID" : "{{ .Values.openStackPublicNetId }}",
"GLOBAL_INJECTED_REGION" : "{{ .Values.openStackRegion }}",
"GLOBAL_INJECTED_REMOTE_REPO" : "http://gerrit.onap.org/r/testsuite/properties.git",
"GLOBAL_INJECTED_SDC_BE_IP_ADDR" : "sdc-be.{{include "common.namespace" .}}",
+ "GLOBAL_INJECTED_SDC_BE_ONBOARD_IP_ADDR" : "sdc-onboarding-be.{{include "common.namespace" .}}",
"GLOBAL_INJECTED_SDC_FE_IP_ADDR" : "sdc-fe.{{include "common.namespace" .}}",
"GLOBAL_INJECTED_SDC_IP_ADDR" : "N/A",
"GLOBAL_INJECTED_SCRIPT_VERSION" : "{{ .Values.scriptVersion }}",
@@ -99,5 +101,5 @@ GLOBAL_INJECTED_PROPERTIES = {
"GLOBAL_INJECTED_UBUNTU_1604_IMAGE" : "{{.Values.ubuntu16Image}}",
"GLOBAL_INJECTED_VID_IP_ADDR" : "vid.{{include "common.namespace" .}}",
"GLOBAL_INJECTED_VM_FLAVOR" : "{{ .Values.openStackFlavourMedium }}",
- "GLOBAL_INJECTED_VNFSDK_IP_ADDR" : "vnfsdk.{{include "common.namespace" .}}"
+ "GLOBAL_INJECTED_VNFSDK_IP_ADDR" : "refrepo.{{include "common.namespace" .}}"
}
diff --git a/kubernetes/sdc/charts/sdc-be/templates/deployment.yaml b/kubernetes/sdc/charts/sdc-be/templates/deployment.yaml
index 6c491b773d..b08c74edad 100644
--- a/kubernetes/sdc/charts/sdc-be/templates/deployment.yaml
+++ b/kubernetes/sdc/charts/sdc-be/templates/deployment.yaml
@@ -36,12 +36,6 @@ spec:
- /root/ready.py
args:
- --container-name
- - "sdc-es"
- - --container-name
- - "sdc-cs"
- - --container-name
- - "sdc-kb"
- - --container-name
- "sdc-onboarding-be"
env:
- name: NAMESPACE
@@ -58,7 +52,7 @@ spec:
- /root/job_complete.py
args:
- --job-name
- - {{ .Release.Name }}-sdc-cs-config-cassandra
+ - {{ .Release.Name }}-sdc-es-config-elasticsearch
env:
- name: NAMESPACE
valueFrom:
diff --git a/kubernetes/sdc/charts/sdc-be/templates/job.yaml b/kubernetes/sdc/charts/sdc-be/templates/job.yaml
index 535d271bb0..c2203d5c20 100644
--- a/kubernetes/sdc/charts/sdc-be/templates/job.yaml
+++ b/kubernetes/sdc/charts/sdc-be/templates/job.yaml
@@ -23,6 +23,7 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
+ backoffLimit: 20
template:
metadata:
labels:
diff --git a/kubernetes/sdc/charts/sdc-cs/templates/job.yaml b/kubernetes/sdc/charts/sdc-cs/templates/job.yaml
index 5318d9149d..cdccbf40a5 100644
--- a/kubernetes/sdc/charts/sdc-cs/templates/job.yaml
+++ b/kubernetes/sdc/charts/sdc-cs/templates/job.yaml
@@ -23,6 +23,7 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
+ backoffLimit: 20
template:
metadata:
labels:
diff --git a/kubernetes/sdc/charts/sdc-es/templates/job.yaml b/kubernetes/sdc/charts/sdc-es/templates/job.yaml
index 6fa0015066..6fd3bc7a42 100644
--- a/kubernetes/sdc/charts/sdc-es/templates/job.yaml
+++ b/kubernetes/sdc/charts/sdc-es/templates/job.yaml
@@ -23,6 +23,7 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
+ backoffLimit: 20
template:
metadata:
labels:
diff --git a/kubernetes/sdc/charts/sdc-fe/templates/deployment.yaml b/kubernetes/sdc/charts/sdc-fe/templates/deployment.yaml
index dc96fc77cd..bbc779044b 100644
--- a/kubernetes/sdc/charts/sdc-fe/templates/deployment.yaml
+++ b/kubernetes/sdc/charts/sdc-fe/templates/deployment.yaml
@@ -37,10 +37,6 @@ spec:
args:
- --container-name
- "sdc-kb"
- - --container-name
- - "sdc-be"
- - --container-name
- - "sdc-onboarding-be"
env:
- name: NAMESPACE
valueFrom:
diff --git a/kubernetes/sdc/charts/sdc-kb/templates/deployment.yaml b/kubernetes/sdc/charts/sdc-kb/templates/deployment.yaml
index f7467cb621..f33dbe6e38 100644
--- a/kubernetes/sdc/charts/sdc-kb/templates/deployment.yaml
+++ b/kubernetes/sdc/charts/sdc-kb/templates/deployment.yaml
@@ -31,20 +31,20 @@ spec:
release: {{ .Release.Name }}
spec:
initContainers:
- - name: {{ include "common.name" . }}-readiness
+ - name: {{ include "common.name" . }}-job-completion
+ image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+ imagePullPolicy: "{{ .Values.global.pullPolicy | default .Values.pullPolicy }}"
command:
- - /root/ready.py
+ - /root/job_complete.py
args:
- - --container-name
- - "sdc-es"
+ - --job-name
+ - {{ .Release.Name }}-sdc-es-config-elasticsearch
env:
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
- imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
containers:
- name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
diff --git a/kubernetes/sdc/charts/sdc-onboarding-be/templates/deployment.yaml b/kubernetes/sdc/charts/sdc-onboarding-be/templates/deployment.yaml
index 20e2ffa104..328a28dccb 100644
--- a/kubernetes/sdc/charts/sdc-onboarding-be/templates/deployment.yaml
+++ b/kubernetes/sdc/charts/sdc-onboarding-be/templates/deployment.yaml
@@ -31,20 +31,6 @@ spec:
release: {{ .Release.Name }}
spec:
initContainers:
- - name: {{ include "common.name" . }}-readiness
- command:
- - /root/ready.py
- args:
- - --container-name
- - "sdc-cs"
- env:
- - name: NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
- imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- name: {{ include "common.name" . }}-job-completion
image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
imagePullPolicy: "{{ .Values.global.pullPolicy | default .Values.pullPolicy }}"
@@ -52,7 +38,7 @@ spec:
- /root/job_complete.py
args:
- --job-name
- - {{ .Release.Name }}-sdc-cs-config-cassandra
+ - {{ .Release.Name }}-sdc-onboarding-be-cassandra-init
env:
- name: NAMESPACE
valueFrom:
diff --git a/kubernetes/sdc/charts/sdc-onboarding-be/templates/job.yaml b/kubernetes/sdc/charts/sdc-onboarding-be/templates/job.yaml
index d089e9a5d7..35f2ef849c 100644
--- a/kubernetes/sdc/charts/sdc-onboarding-be/templates/job.yaml
+++ b/kubernetes/sdc/charts/sdc-onboarding-be/templates/job.yaml
@@ -23,6 +23,7 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
+ backoffLimit: 20
template:
metadata:
labels:
@@ -31,20 +32,6 @@ spec:
spec:
restartPolicy: Never
initContainers:
- - name: {{ include "common.name" . }}-init-readiness
- image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
- imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- command:
- - /root/ready.py
- args:
- - --container-name
- - sdc-cs
- env:
- - name: NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- name: {{ include "common.name" . }}-job-completion
image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
imagePullPolicy: "{{ .Values.global.pullPolicy | default .Values.pullPolicy }}"
diff --git a/kubernetes/sdnc/templates/pv.yaml b/kubernetes/sdnc/templates/pv.yaml
new file mode 100644
index 0000000000..e20e2818a3
--- /dev/null
+++ b/kubernetes/sdnc/templates/pv.yaml
@@ -0,0 +1,84 @@
+{{/*
+# Copyright © 2018 Amdocs, AT&T, Bell Canada
+# #
+# # Licensed under the Apache License, Version 2.0 (the "License");
+# # you may not use this file except in compliance with the License.
+# # You may obtain a copy of the License at
+# #
+# # http://www.apache.org/licenses/LICENSE-2.0
+# #
+# # Unless required by applicable law or agreed to in writing, software
+# # distributed under the License is distributed on an "AS IS" BASIS,
+# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# # See the License for the specific language governing permissions and
+# # limitations under the License.
+*/}}
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+{{ $pvNum := default 1 .Values.replicaCount | int }}
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}-data0
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}0
+{{ if gt $pvNum 1 }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}-data1
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}1
+{{ end }}
+{{ if gt $pvNum 2 }}
+---
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}-data2
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: "{{ include "common.fullname" . }}-data"
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}2
+{{ end }}
+{{- end -}} \ No newline at end of file
diff --git a/kubernetes/sdnc/templates/statefulset.yaml b/kubernetes/sdnc/templates/statefulset.yaml
index 55eaf2b4d0..ccf283ff4c 100644
--- a/kubernetes/sdnc/templates/statefulset.yaml
+++ b/kubernetes/sdnc/templates/statefulset.yaml
@@ -102,6 +102,8 @@ spec:
- mountPath: /opt/onap/sdnc/svclogic/config/svclogic.properties
name: onap-sdnc-svclogic-config
subPath: svclogic.properties
+ - mountPath: {{ .Values.persistence.mdsalPath }}
+ name: {{ include "common.fullname" . }}-data
- mountPath: /var/log/onap
name: logs
resources:
@@ -127,6 +129,8 @@ spec:
name: logs
- mountPath: /usr/share/filebeat/data
name: data-filebeat
+ imagePullSecrets:
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
volumes:
- name: localtime
hostPath:
@@ -183,6 +187,19 @@ spec:
- key: svclogic.properties
path: svclogic.properties
mode: 0644
-
- imagePullSecrets:
- - name: "{{ include "common.namespace" . }}-docker-registry-key"
+ {{ if not .Values.persistence.enabled }}
+ - name: {{ include "common.fullname" . }}-data
+ emptyDir: {}
+ {{ else }}
+ volumeClaimTemplates:
+ - metadata:
+ name: {{ include "common.fullname" . }}-data
+ labels:
+ name: {{ include "common.fullname" . }}
+ spec:
+ accessModes: [ {{ .Values.persistence.accessMode }} ]
+ storageClassName: {{ include "common.fullname" . }}-data
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+ {{ end }} \ No newline at end of file
diff --git a/kubernetes/sdnc/values.yaml b/kubernetes/sdnc/values.yaml
index e3cd8ae4ed..2603cd9fca 100644
--- a/kubernetes/sdnc/values.yaml
+++ b/kubernetes/sdnc/values.yaml
@@ -142,6 +142,28 @@ service:
geoNodePort5: 65
geoNodePort6: 66
+## Persist data to a persitent volume
+persistence:
+ enabled: true
+
+ ## A manually managed Persistent Volume and Claim
+ ## Requires persistence.enabled: true
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ volumeReclaimPolicy: Retain
+
+ ## database data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ accessMode: ReadWriteOnce
+ size: 1Gi
+ mountPath: /dockerdata-nfs
+ mountSubPath: sdnc/mdsal
+ mdsalPath: /opt/opendaylight/current/daexim
+
ingress:
enabled: false
diff --git a/kubernetes/vfc/resources/config/logging/filebeat/filebeat.yml b/kubernetes/vfc/resources/config/logging/filebeat/filebeat.yml
index 5967963f1b..0bc14ea908 100644
--- a/kubernetes/vfc/resources/config/logging/filebeat/filebeat.yml
+++ b/kubernetes/vfc/resources/config/logging/filebeat/filebeat.yml
@@ -21,8 +21,7 @@ output.logstash:
#List of logstash server ip addresses with port number.
#But, in our case, this will be the loadbalancer IP address.
#For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
- #hosts: ["logstash.{{.Values.nsPrefix}}:5044"]
- hosts: ["{{.Release.Name}}-{{.Values.config.logstashServiceName}}.{{.Release.Namespace}}:{{.Values.config.logstashPort}}"]
+ hosts: ["{{.Values.config.logstashServiceName}}:{{.Values.config.logstashPort}}"]
#If enable will do load balancing among availabe Logstash, automatically.
loadbalance: true
diff --git a/kubernetes/vid/templates/deployment.yaml b/kubernetes/vid/templates/deployment.yaml
index 929c260815..5491808b9c 100644
--- a/kubernetes/vid/templates/deployment.yaml
+++ b/kubernetes/vid/templates/deployment.yaml
@@ -77,13 +77,13 @@ spec:
- name: ASDC_CLIENT_REST_PORT
value: "{{ .Values.config.asdcclientrestport }}"
- name: VID_AAI_HOST
- value: aai-service.{{ include "common.namespace" . }}
+ value: aai.{{ include "common.namespace" . }}
- name: VID_AAI_PORT
value: "{{ .Values.config.vidaaiport }}"
- name: VID_ECOMP_SHARED_CONTEXT_REST_URL
- value: http://portalapps.{{ include "common.namespace" . }}:"{{ .Values.config.onapport }}"/ONAPPORTAL/context
+ value: http://portal-app.{{ include "common.namespace" . }}:{{ .Values.config.onapport }}/ONAPPORTAL/context
- name: VID_MSO_SERVER_URL
- value: http://mso.{{ include "common.namespace" . }}:"{{ .Values.config.msoport }}"
+ value: http://mso.{{ include "common.namespace" . }}:{{ .Values.config.msoport }}
- name: VID_MSO_PASS
value: "{{ .Values.config.vidmsopass }}"
- name: MSO_DME2_SERVER_URL
@@ -91,9 +91,9 @@ spec:
- name: MSO_DME2_ENABLED
value: {{ .Values.global.debugEnabled | default .Values.debugEnabled | quote }}
- name: VID_ECOMP_REDIRECT_URL
- value: http://portalapps.{{ include "common.namespace" . }}:"{{ .Values.config.onapport }}"/ONAPPORTAL/login.htm
+ value: http://portal-app.{{ include "common.namespace" . }}:{{ .Values.config.onapport }}/ONAPPORTAL/login.htm
- name: VID_ECOMP_REST_URL
- value: http://portalapps.{{ include "common.namespace" . }}:"{{ .Values.config.onapport }}"/ONAPPORTAL/auxapi
+ value: http://portal-app.{{ include "common.namespace" . }}:{{ .Values.config.onapport }}/ONAPPORTAL/auxapi
- name: VID_CONTACT_US_LINK
value: "{{ .Values.config.vidcontactuslink }}"
- name: VID_UEB_URL_LIST
diff --git a/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/job.yaml b/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/deployment.yaml
index 62ca119ebd..88d6238f35 100644
--- a/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/job.yaml
+++ b/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/deployment.yaml
@@ -12,31 +12,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-apiVersion: batch/v1
-kind: Job
+apiVersion: extensions/v1beta1
+kind: Deployment
metadata:
- name: {{ include "common.fullname" . }}-config
+ name: {{ include "common.fullname" . }}
namespace: {{ include "common.namespace" . }}
labels:
- app: {{ include "common.name" . }}-job
+ app: {{ include "common.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "common.name" . }}
template:
metadata:
labels:
- app: {{ include "common.name" . }}-job
+ app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ name: {{ include "common.name" . }}
spec:
- restartPolicy: Never
- hostname: postgres
+ hostname: {{ include "common.name" . }}
containers:
- - name: {{ include "common.name" . }}-job
+ - args:
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
- env:
- - name: DB_HOST
- value: "{{ include "common.servicename" . }}"
+ name: {{ include "common.name" . }}
imagePullSecrets:
- name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/service.yaml b/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/service.yaml
index 55221b13f7..f6208cf965 100644
--- a/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/service.yaml
+++ b/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/service.yaml
@@ -32,7 +32,7 @@ spec:
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
{{- end}}
- name: {{ .Values.service.portName }}
+ name: {{ .Values.service.portName | default "http" }}
selector:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
diff --git a/kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml b/kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml
index 4329a222e2..c35a1f9973 100644
--- a/kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml
+++ b/kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml
@@ -29,7 +29,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/refrepo/postgres:latest
+image: onap/vnfsdk/refrepo/postgres:latest
pullPolicy: Always
# flag to enable debugging - application support required
@@ -55,8 +55,7 @@ readiness:
service:
type: ClusterIP
- name: vnfsdk-pgset
- portName: vnfsdk-pgset
+ name: postgres
internalPort: 5432
externalPort: 5432
diff --git a/kubernetes/vnfsdk/requirements.yaml b/kubernetes/vnfsdk/requirements.yaml
index caf41498fb..f639633537 100644
--- a/kubernetes/vnfsdk/requirements.yaml
+++ b/kubernetes/vnfsdk/requirements.yaml
@@ -19,6 +19,3 @@ dependencies:
# a part of this chart's package and will not
# be published independently to a repo (at this point)
repository: '@local'
- - name: postgres
- version: ~2.0.0
- repository: '@local'
diff --git a/kubernetes/vnfsdk/templates/deployment.yaml b/kubernetes/vnfsdk/templates/deployment.yaml
index 2e5f40b83c..35bff1e38d 100644
--- a/kubernetes/vnfsdk/templates/deployment.yaml
+++ b/kubernetes/vnfsdk/templates/deployment.yaml
@@ -38,7 +38,7 @@ spec:
- /root/ready.py
args:
- --container-name
- - "{{ index .Values "vnfsdk-postgres" "nameOverride" }}"
+ - "{{ .Values.vnfsdkpostgres.nameOverride }}"
env:
- name: NAMESPACE
valueFrom:
diff --git a/kubernetes/vnfsdk/templates/service.yaml b/kubernetes/vnfsdk/templates/service.yaml
index 55221b13f7..f6208cf965 100644
--- a/kubernetes/vnfsdk/templates/service.yaml
+++ b/kubernetes/vnfsdk/templates/service.yaml
@@ -32,7 +32,7 @@ spec:
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
{{- end}}
- name: {{ .Values.service.portName }}
+ name: {{ .Values.service.portName | default "http" }}
selector:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
diff --git a/kubernetes/vnfsdk/values.yaml b/kubernetes/vnfsdk/values.yaml
index 009d42d842..887c680515 100644
--- a/kubernetes/vnfsdk/values.yaml
+++ b/kubernetes/vnfsdk/values.yaml
@@ -29,12 +29,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/refrepo:1.1-STAGING-latest
+image: onap/vnfsdk/refrepo:1.1-STAGING-latest
pullPolicy: Always
#subchart name
-vnfsdk-postgres:
- nameOverride: postgres
+vnfsdkpostgres:
+ nameOverride: vnfsdk-postgres
# flag to enable debugging - application support required
debugEnabled: false