diff options
34 files changed, 253 insertions, 69 deletions
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/resources/config/config.json b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/resources/config/config.json new file mode 100644 index 0000000000..3a43f00019 --- /dev/null +++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/resources/config/config.json @@ -0,0 +1,7 @@ +{ + "url":"https://aaf-sms.{{ include "common.namespace" . }}:10443", + "cafile": "/quorumclient/certs/aaf_root_ca.cer", + "clientcert":"client.cert", + "clientkey":"client.key", + "timeout":"10s" +}
\ No newline at end of file diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml index cacc368df1..9905a3cbee 100644 --- a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml +++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml @@ -23,5 +23,4 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: - config.json: | - {{ .Values.config | toJson }} +{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
\ No newline at end of file diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml index 483d6c5f17..281229f95c 100644 --- a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml +++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml @@ -63,6 +63,10 @@ spec: - name : {{ include "common.name" . }} configMap: name: {{ include "common.fullname" . }} + items: + - key: config.json + path: config.json + mode: 0755 - name: {{ include "common.fullname" . }}-auth persistentVolumeClaim: claimName: {{ include "common.fullname" . }} diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml index b528270eed..768f89fb7e 100644 --- a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml +++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml @@ -36,14 +36,6 @@ debugEnabled: false # application configuration # Example: -config: - url: "http://aaf-sms:10443" - cafile: "selfsignedca.pem" - clientcert: "server.cert" - clientkey: "server.key" - timeout: "60s" - disable_tls: true - # default number of instances replicaCount: 3 diff --git a/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml index 4235ad01af..4bdb84fa30 100644 --- a/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml +++ b/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml @@ -40,14 +40,18 @@ spec: - containerPort: {{ .Values.service.internalPort }} {{- if eq .Values.liveness.enabled true }} livenessProbe: - tcpSocket: + httpGet: port: {{ .Values.service.internalPort }} + scheme: HTTPS + path: /v1/sms/quorum/status initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }} periodSeconds: {{ .Values.liveness.periodSeconds }} {{ end -}} readinessProbe: - tcpSocket: + httpGet: port: {{ .Values.service.internalPort }} + scheme: HTTPS + path: /v1/sms/quorum/status initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} periodSeconds: {{ .Values.readiness.periodSeconds }} volumeMounts: diff --git a/kubernetes/aaf/charts/aaf-sms/values.yaml b/kubernetes/aaf/charts/aaf-sms/values.yaml index fa01b38834..df2b6ab640 100644 --- a/kubernetes/aaf/charts/aaf-sms/values.yaml +++ b/kubernetes/aaf/charts/aaf-sms/values.yaml @@ -38,10 +38,10 @@ debugEnabled: false # Example: config: smsdbaddress: "http://aaf-sms-db:8200" - cafile: "/sms/auth/selfsignedca.pem" - servercert: "/sms/auth/server.cert" - serverkey: "/sms/auth/server.key" - disable_tls: true + cafile: "/sms/certs/aaf_root_ca.cer" + servercert: "/sms/certs/aaf-sms.pub" + serverkey: "/sms/certs/aaf-sms.pr" + password: "c2VjcmV0bWFuYWdlbWVudHNlcnZpY2VzZWNyZXRwYXNzd29yZA==" # subchart configuration vault: @@ -57,14 +57,14 @@ affinity: {} # probe configuration parameters liveness: initialDelaySeconds: 10 - periodSeconds: 20 + periodSeconds: 30 # necessary to disable liveness probe when setting breakpoints # in debugger so K8s doesn't restart unresponsive container enabled: true readiness: initialDelaySeconds: 10 - periodSeconds: 20 + periodSeconds: 30 service: type: NodePort diff --git a/kubernetes/aai/charts/aai-resources/resources/config/aaiconfig.properties b/kubernetes/aai/charts/aai-resources/resources/config/aaiconfig.properties index 41676cf869..1763a8eda9 100644 --- a/kubernetes/aai/charts/aai-resources/resources/config/aaiconfig.properties +++ b/kubernetes/aai/charts/aai-resources/resources/config/aaiconfig.properties @@ -103,7 +103,7 @@ aai.transaction.logging=true aai.transaction.logging.get=false aai.transaction.logging.post=false -aai.realtime.clients=SDNC,MSO,SO +aai.realtime.clients=SDNC,MSO,SO,robot-ete #timeout for crud enabled flag aai.crud.timeoutenabled=true diff --git a/kubernetes/aai/charts/aai-traversal/resources/config/aaiconfig.properties b/kubernetes/aai/charts/aai-traversal/resources/config/aaiconfig.properties index b095c4cebb..735609b424 100644 --- a/kubernetes/aai/charts/aai-traversal/resources/config/aaiconfig.properties +++ b/kubernetes/aai/charts/aai-traversal/resources/config/aaiconfig.properties @@ -103,7 +103,7 @@ aai.transaction.logging=true aai.transaction.logging.get=false aai.transaction.logging.post=false -aai.realtime.clients=SDNC,MSO,SO +aai.realtime.clients=SDNC,MSO,SO,robot-ete #timeout for traversal enabled flag aai.traversal.timeoutenabled=true diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties index 007d0e15fe..d59d20d736 100644 --- a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties +++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties @@ -81,6 +81,7 @@ appc.LCM.client.name=APPC-EVENT-LISTENER-TEST appc.LCM.provider.user=admin appc.LCM.provider.pass=admin appc.LCM.provider.url=http://localhost:8181/restconf/operations/appc-provider-lcm +appc.LCM.scopeOverlap.endpoint=http://localhost:8181/restconf/operations/interfaces-service:execute-service # properties from appc-netconf-adapter-bundle, appc-dg-common, appc-dmaap-adapter-bundle poolMembers=message-router.{{.Release.Namespace}}:3904 diff --git a/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml b/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml index d631f44f34..ab3ec43eba 100644 --- a/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml +++ b/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml @@ -86,7 +86,6 @@ network.host: 0.0.0.0 # By default, Elasticsearch will bind to the available loopback addresses and will scan ports 9300 to 9305 to try # to connect to other nodes running on the same server. # -#discovery.zen.ping.unicast.hosts: ["elasticsearch.{{.Values.nsPrefix}}" #$discovery.zen.ping.unicast.hosts # # This setting tells Elasticsearch to not elect a master unless there are enough master-eligible nodes diff --git a/kubernetes/clamp/charts/clamp-dash-es/values.yaml b/kubernetes/clamp/charts/clamp-dash-es/values.yaml index 83fb73e9a8..7a8becf66f 100644 --- a/kubernetes/clamp/charts/clamp-dash-es/values.yaml +++ b/kubernetes/clamp/charts/clamp-dash-es/values.yaml @@ -32,8 +32,8 @@ busyboxImage: library/busybox:latest # application image loggingRepository: docker.elastic.co -image: elasticsearch/elasticsearch:5.6.8 -pullPolicy: IfNotPresent +image: elasticsearch/elasticsearch:5.6.9 +pullPolicy: Always # flag to enable debugging - application support required debugEnabled: false @@ -81,6 +81,7 @@ persistence: size: 4Gi mountPath: /dockerdata-nfs mountSubPath: clamp/dashboard-elasticsearch/data + mountSubPathLogs: clamp service: type: ClusterIP diff --git a/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml b/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml index 64b515c1d5..60a504d515 100644 --- a/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml +++ b/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml @@ -39,8 +39,8 @@ busyboxImage: library/busybox:latest # application image loggingRepository: docker.elastic.co -image: kibana/kibana:5.6.8 -pullPolicy: IfNotPresent +image: kibana/kibana:5.6.9 +pullPolicy: Always # flag to enable debugging - application support required debugEnabled: false diff --git a/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml b/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml index e3463e91ab..bb12210398 100644 --- a/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml +++ b/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml @@ -30,8 +30,8 @@ global: # application image loggingRepository: docker.elastic.co -image: logstash/logstash:5.6.8 -pullPolicy: IfNotPresent +image: logstash/logstash:5.6.9 +pullPolicy: Always # flag to enable debugging - application support required debugEnabled: false diff --git a/kubernetes/clamp/resources/config/sdc-controllers-config.json b/kubernetes/clamp/resources/config/sdc-controllers-config.json new file mode 100644 index 0000000000..3adda95c11 --- /dev/null +++ b/kubernetes/clamp/resources/config/sdc-controllers-config.json @@ -0,0 +1,18 @@ +{ + "sdc-connections":{ + "sdc-controller":{ + "user": "clamp", + "consumerGroup": "clamp", + "consumerId": "clamp", + "environmentName": "AUTO", + "sdcAddress": "sdc-be.{{ include "common.namespace" . }}:8443", + "password": "b7acccda32b98c5bb7acccda32b98c5b05D511BD6D93626E90D18E9D24D9B78CD34C7EE8012F0A189A28763E82271E50A5D4EC10C7D93E06E0A2D27CAE66B981", + "pollingInterval":30, + "pollingTimeout":30, + "activateServerTLSAuth":"false", + "keyStorePassword":"", + "keyStorePath":"", + "messageBusAddresses":["message-router.{{ include "common.namespace" . }}"] + } + } +} diff --git a/kubernetes/clamp/templates/configmap.yaml b/kubernetes/clamp/templates/configmap.yaml index bee8f132ea..7a66c64755 100644 --- a/kubernetes/clamp/templates/configmap.yaml +++ b/kubernetes/clamp/templates/configmap.yaml @@ -23,4 +23,5 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: - spring_application_json: {{ tpl .Values.config.springApplicationJson . | quote }} +{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }} + spring_application_json: {{ tpl .Values.config.springApplicationJson . | quote }} diff --git a/kubernetes/clamp/templates/deployment.yaml b/kubernetes/clamp/templates/deployment.yaml index 4a3a0f9e4f..38eabeb968 100644 --- a/kubernetes/clamp/templates/deployment.yaml +++ b/kubernetes/clamp/templates/deployment.yaml @@ -65,6 +65,10 @@ spec: port: {{ .Values.service.internalPort }} initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} periodSeconds: {{ .Values.readiness.periodSeconds }} + volumeMounts: + - mountPath: /opt/clamp/sdc-controllers-config.json + name: {{ include "common.fullname" . }}-config + subPath: sdc-controllers-config.json env: - name: SPRING_APPLICATION_JSON valueFrom: @@ -81,5 +85,12 @@ spec: affinity: {{ toYaml .Values.affinity | indent 10 }} {{- end }} + volumes: + - name: {{ include "common.fullname" . }}-config + configMap: + name: {{ include "common.fullname" . }} + items: + - key: sdc-controllers-config.json + path: sdc-controllers-config.json imagePullSecrets: - name: "{{ include "common.namespace" . }}-docker-registry-key" diff --git a/kubernetes/clamp/values.yaml b/kubernetes/clamp/values.yaml index 06fe3d9d18..71752fcfc6 100644 --- a/kubernetes/clamp/values.yaml +++ b/kubernetes/clamp/values.yaml @@ -41,17 +41,18 @@ config: dataRootDir: /dockerdata-nfs springApplicationJson: > { - "spring.datasource.cldsdb.url": "jdbc:mariadb:sequential://clampdb:3306/cldsdb4?autoReconnect=true&connectTimeout=10000&socketTimeout=10000&retriesAllDown=3", - "clamp.config.sdc.catalog.url": "https://sdc-be:8443/sdc/v1/catalog/", - "clamp.config.sdc.hostUrl": "https://sdc-be:8443/", - "clamp.config.sdc.serviceUrl": "https://sdc-be:8443/sdc/v1/catalog/services", + "spring.datasource.cldsdb.url": "jdbc:mariadb:sequential://clampdb.{{ include "common.namespace" . }}:3306/cldsdb4?autoReconnect=true&connectTimeout=10000&socketTimeout=10000&retriesAllDown=3", + "clamp.config.sdc.catalog.url": "http://sdc-be.{{ include "common.namespace" . }}:8080/sdc/v1/catalog/", + "clamp.config.sdc.hostUrl": "http://sdc-be.{{ include "common.namespace" . }}:8080/", + "clamp.config.sdc.serviceUrl": "http://sdc-be.{{ include "common.namespace" . }}:8080/sdc/v1/catalog/services", "clamp.config.sdc.serviceUsername": "clamp", "clamp.config.sdc.servicePassword": "b7acccda32b98c5bb7acccda32b98c5b05D511BD6D93626E90D18E9D24D9B78CD34C7EE8012F0A189A28763E82271E50A5D4EC10C7D93E06E0A2D27CAE66B981", - "clamp.config.dcae.inventory.url": "http://dcaegen2:8080", - "clamp.config.dcae.dispatcher.url": "http://dcaegen2:8080", - "clamp.config.policy.pdpUrl1": "https://policy-pdp:9091/pdp/ , testpdp, alpha123", - "clamp.config.policy.pdpUrl2": "https://policy-pdp:9091/pdp/ , testpdp, alpha123", - "clamp.config.policy.papUrl": "https://policy-pap:8443/pap/ , testpap, alpha123", + "clamp.config.files.sdcController": "file:/opt/clamp/sdc-controllers-config.json", + "clamp.config.dcae.inventory.url": "http://dcaegen2.{{ include "common.namespace" . }}:8080", + "clamp.config.dcae.dispatcher.url": "http://dcaegen2.{{ include "common.namespace" . }}:8080", + "clamp.config.policy.pdpUrl1": "https://pdp.{{ include "common.namespace" . }}:9091/pdp/ , testpdp, alpha123", + "clamp.config.policy.pdpUrl2": "https://pdp.{{ include "common.namespace" . }}:9091/pdp/ , testpdp, alpha123", + "clamp.config.policy.papUrl": "https://pap.{{ include "common.namespace" . }}:8443/pap/ , testpap, alpha123", "clamp.config.policy.clientKey": "5CE79532B3A2CB4D132FC0C04BF916A7" } diff --git a/kubernetes/consul/resources/config/consul-agent-config/policy-health.json b/kubernetes/consul/resources/config/consul-agent-config/policy-health.json new file mode 100644 index 0000000000..22d135b6dd --- /dev/null +++ b/kubernetes/consul/resources/config/consul-agent-config/policy-health.json @@ -0,0 +1,111 @@ +{ + "service": { + "name": "Health Check: Policy", + "checks": [ + { + "id": "Policy-mariadb-healthcheck", + "name": "Policy Mariadb Health Check", + "script": "/consul/scripts/policy-mariadb-script.sh", + "interval": "10s", + "timeout": "1s" + }, + { + "id": "policy-nexus-local-status", + "name": "Policy Nexus Local Status", + "http": "http://nexus:8081/nexus/service/local/status?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic YWRtaW46YWRtaW4xMjM="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "policy-nexus-internal-metrics", + "name": "Policy Nexus Internal Metrics", + "http": "http://nexus:8081/nexus/internal/metrics?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic YWRtaW46YWRtaW4xMjM="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "policy-nexus-internal-healthcheck", + "name": "Policy Nexus Internal Healthcheck", + "http": "http://nexus:8081/nexus/internal/healthcheck?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic YWRtaW46YWRtaW4xMjM="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "brmsgw-tcp", + "name": "BRMSGW Health Check", + "tcp": "brmsgw:9989", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "drools", + "name": "Drools Health Check", + "http": "http://drools:6969/healthcheck?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic aGVhbHRoY2hlY2s6emIhWHp0RzM0"], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "pap", + "name": "PAP Health Check", + "http": "http://pap:9091/pap/test?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic dGVzdHBhcDphbHBoYTEyMw=="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "pdp", + "name": "PDP Health Check", + "http": "http://pdp:8081/pdp/test?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic dGVzdHBkcDphbHBoYTEyMw=="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/policy-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/policy-mariadb-script.sh new file mode 100644 index 0000000000..29dbe3f864 --- /dev/null +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/policy-mariadb-script.sh @@ -0,0 +1,14 @@ +NAME=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "[^[:space:]]*-policydb[^[:space:]]*") + + if [ -n "$NAME" ]; then + if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then + echo Success. mariadb process is running. 2>&1 + exit 0 + else + echo Failed. mariadb process is not running. 2>&1 + exit 1 + fi + else + echo Failed. mariadb container is offline. 2>&1 + exit 1 + fi diff --git a/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml b/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml index 404e059bd7..85f429c23d 100644 --- a/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml +++ b/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml @@ -75,7 +75,7 @@ componentImages: inventory: onap/org.onap.dcaegen2.platform.inventory-api:latest policy_handler: onap/org.onap.dcaegen2.platform.policy-handler:latest service_change_handler: onap/org.onap.dcaegen2.platform.servicechange-handler:latest - tca: onap/org.onap.dcaegen2.deployments.tca-cdap-container.tca-cdap-container:latest + tca: onap/org.onap.dcaegen2.deployments.tca-cdap-container:latest ves: onap/org.onap.dcaegen2.collectors.ves.vescollector:latest # Kubernetes namespace for components deployed via Cloudify manager diff --git a/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml b/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml index 212307cae0..b3bd31726b 100644 --- a/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml +++ b/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml @@ -25,7 +25,6 @@ global: ################################################################# # Application configuration defaults. ################################################################# -#nsPrefix: onap pullPolicy: Always # application images diff --git a/kubernetes/log/charts/log-elasticsearch/resources/config/elasticsearch.yml b/kubernetes/log/charts/log-elasticsearch/resources/config/elasticsearch.yml index abdab8beb5..8bbb01997e 100644 --- a/kubernetes/log/charts/log-elasticsearch/resources/config/elasticsearch.yml +++ b/kubernetes/log/charts/log-elasticsearch/resources/config/elasticsearch.yml @@ -86,7 +86,6 @@ network.host: 0.0.0.0 # By default, Elasticsearch will bind to the available loopback addresses and will scan ports 9300 to 9305 to try # to connect to other nodes running on the same server. # -#discovery.zen.ping.unicast.hosts: ["elasticsearch.{{.Values.nsPrefix}}" #$discovery.zen.ping.unicast.hosts # # This setting tells Elasticsearch to not elect a master unless there are enough master-eligible nodes diff --git a/kubernetes/oof/charts/oof-has/values.yaml b/kubernetes/oof/charts/oof-has/values.yaml index 30ef637873..25a076b6cc 100755 --- a/kubernetes/oof/charts/oof-has/values.yaml +++ b/kubernetes/oof/charts/oof-has/values.yaml @@ -27,7 +27,6 @@ global: optf_has: onap/optf-has:latest filebeat: docker.elastic.co/beats/filebeat:5.5.0 -nsPrefix: onap pullPolicy: Always nodePortPrefix: 302 dataRootDir: /dockerdata-nfs diff --git a/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh b/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh index 65ef7053cd..54700078c5 100644 --- a/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh +++ b/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh @@ -16,3 +16,7 @@ ${POLICY_HOME}/bin/features enable healthcheck +${POLICY_HOME}/bin/features enable pooling-dmaap +${POLICY_HOME}/bin/features enable distributed-locking + +${POLICY_HOME}/bin/db-migrator -s pooling -o upgrade diff --git a/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/feature-pooling-dmaap.conf b/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/feature-pooling-dmaap.conf new file mode 100644 index 0000000000..82384075b5 --- /dev/null +++ b/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/feature-pooling-dmaap.conf @@ -0,0 +1,15 @@ +# Copyright 2018 AT&T Intellectual Property. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +POOLING_TOPIC=POOLING diff --git a/kubernetes/policy/charts/drools/templates/statefulset.yaml b/kubernetes/policy/charts/drools/templates/statefulset.yaml index 9041478c8c..68b415e989 100644 --- a/kubernetes/policy/charts/drools/templates/statefulset.yaml +++ b/kubernetes/policy/charts/drools/templates/statefulset.yaml @@ -88,6 +88,9 @@ spec: - mountPath: /tmp/policy-install/config/feature-healthcheck.conf name: drools-secret subPath: feature-healthcheck.conf + - mountPath: /tmp/policy-install/config/feature-pooling-dmaap.conf + name: drools-config + subPath: feature-pooling-dmaap.conf - mountPath: /tmp/policy-install/config/base.conf name: drools-config subPath: base.conf @@ -160,6 +163,9 @@ spec: - key: base.conf path: base.conf mode: 0755 + - key: feature-pooling-dmaap.conf + path: feature-pooling-dmaap.conf + mode: 0755 - key: policy-management.conf path: policy-management.conf mode: 0755 diff --git a/kubernetes/policy/charts/drools/values.yaml b/kubernetes/policy/charts/drools/values.yaml index e42e6ce049..ff0d06e160 100644 --- a/kubernetes/policy/charts/drools/values.yaml +++ b/kubernetes/policy/charts/drools/values.yaml @@ -51,7 +51,7 @@ liveness: periodSeconds: 10 # necessary to disable liveness probe when setting breakpoints # in debugger so K8s doesn't restart unresponsive container - enabled: false + enabled: true readiness: initialDelaySeconds: 10 diff --git a/kubernetes/policy/charts/mariadb/resources/config/db.sh b/kubernetes/policy/charts/mariadb/resources/config/db.sh index ead656ef0e..067b3c312f 100644 --- a/kubernetes/policy/charts/mariadb/resources/config/db.sh +++ b/kubernetes/policy/charts/mariadb/resources/config/db.sh @@ -14,7 +14,7 @@ #!/bin/bash -xv -for db in support onap_sdk log +for db in support onap_sdk log migration operationshistory10 pooling do mysql -uroot -p"${MYSQL_ROOT_PASSWORD}" --execute "CREATE DATABASE IF NOT EXISTS ${db};" mysql -uroot -p"${MYSQL_ROOT_PASSWORD}" --execute "GRANT ALL PRIVILEGES ON \`${db}\`.* TO '${MYSQL_USER}'@'%' ;" diff --git a/kubernetes/policy/resources/config/pe/pap.conf b/kubernetes/policy/resources/config/pe/pap.conf index ae9d47c491..3fe9bd0cf9 100644 --- a/kubernetes/policy/resources/config/pe/pap.conf +++ b/kubernetes/policy/resources/config/pe/pap.conf @@ -39,7 +39,7 @@ REST_ADMIN_REPOSITORY=repository REST_ADMIN_WORKSPACE=workspace # PDP related properties -PAP_PDP_URL=http://{{ .Release.Name }}-{{ .Values.global.pdp.nameOverride }}-0.{{ .Values.global.pdp.nameOverride }}.{{ include "common.namespace" . }}.svc.cluster.local:{{ .Values.config.pdpPort }}/pdp/,testpdp,alpha123;http://{{ .Release.Name }}-{{ .Values.global.pdp.nameOverride }}-1.{{ .Values.global.pdp.nameOverride }}.{{ include "common.namespace" . }}.svc.cluster.local:{{ .Values.config.pdpPort }}/pdp/ +PAP_PDP_URL=http://{{ .Release.Name }}-{{ .Values.global.pdp.nameOverride }}-0.{{ .Values.global.pdp.nameOverride }}.{{ include "common.namespace" . }}.svc.cluster.local:{{ .Values.config.pdpPort }}/pdp/,testpdp,alpha123;http://{{ .Release.Name }}-{{ .Values.global.pdp.nameOverride }}-1.{{ .Values.global.pdp.nameOverride }}.{{ include "common.namespace" . }}.svc.cluster.local:{{ .Values.config.pdpPort }}/pdp/,testpdp,alpha123;http://{{ .Release.Name }}-{{ .Values.global.pdp.nameOverride }}-3.{{ .Values.global.pdp.nameOverride }}.{{ include "common.namespace" . }}.svc.cluster.local:{{ .Values.config.pdpPort }}/pdp/,testpdp,alpha123;http://{{ .Release.Name }}-{{ .Values.global.pdp.nameOverride }}-4.{{ .Values.global.pdp.nameOverride }}.{{ include "common.namespace" . }}.svc.cluster.local:{{ .Values.config.pdpPort }}/pdp/ PAP_PDP_HTTP_USER_ID=testpdp PAP_PDP_HTTP_PASSWORD=alpha123 diff --git a/kubernetes/portal/charts/portal-mariadb/values.yaml b/kubernetes/portal/charts/portal-mariadb/values.yaml index ab469848f2..ae5849eb27 100644 --- a/kubernetes/portal/charts/portal-mariadb/values.yaml +++ b/kubernetes/portal/charts/portal-mariadb/values.yaml @@ -77,14 +77,14 @@ affinity: {} # probe configuration parameters liveness: - initialDelaySeconds: 10 + initialDelaySeconds: 300 periodSeconds: 10 # necessary to disable liveness probe when setting breakpoints # in debugger so K8s doesn't restart unresponsive container enabled: true readiness: - initialDelaySeconds: 10 + initialDelaySeconds: 300 periodSeconds: 10 ## Persist data to a persitent volume diff --git a/kubernetes/robot/demo-k8s.sh b/kubernetes/robot/demo-k8s.sh index de3362740e..f5e4398940 100755 --- a/kubernetes/robot/demo-k8s.sh +++ b/kubernetes/robot/demo-k8s.sh @@ -69,7 +69,7 @@ do VARIABLES="$VARIABLES -v WEB_PASSWORD:$WEB_PASSWORD" shift if [ $# -eq 2 ];then - VARIABLES="$VARIABLES -v HOSTS_PREFIX:$2" + VARIABLES="$VARIABLES -v HOSTS_PREFIX:$1" fi shift ;; @@ -85,7 +85,7 @@ do TAG="InitDistribution" shift if [ $# -eq 1 ];then - VARIABLES="$VARIABLES -v DEMO_PREFIX:$2" + VARIABLES="$VARIABLES -v DEMO_PREFIX:$1" fi shift ;; @@ -93,24 +93,24 @@ do TAG="PreloadDemo" shift if [ $# -ne 2 ];then - echo "Usage: demo.sh preload <vnf_name> <module_name>" + echo "Usage: demo.sh <namespace> preload <vnf_name> <module_name>" exit fi - VARIABLES="$VARIABLES -v VNF_NAME:$2" + VARIABLES="$VARIABLES -v VNF_NAME:$1" shift - VARIABLES="$VARIABLES -v MODULE_NAME:$2" + VARIABLES="$VARIABLES -v MODULE_NAME:$1" shift ;; appc) - TAG="APPCMountPointDemo" - shift - if [ $# -ne 1 ];then - echo "Usage: demo.sh appc <module_name>" - exit - fi - VARIABLES="$VARIABLES -v MODULE_NAME:$2" - shift - ;; + TAG="APPCMountPointDemo" + shift + if [ $# -ne 1 ];then + echo "Usage: demo.sh <namespace> appc <module_name>" + exit + fi + VARIABLES="$VARIABLES -v MODULE_NAME:$1" + shift + ;; instantiateVFW) TAG="instantiateVFW" VARIABLES="$VARIABLES -v GLOBAL_BUILD_NUMBER:$$" @@ -120,10 +120,10 @@ do TAG="deleteVNF" shift if [ $# -ne 1 ];then - echo "Usage: demo.sh deleteVNF <module_name from instantiateVFW>" + echo "Usage: demo.sh <namespace> deleteVNF <module_name from instantiateVFW>" exit fi - VARFILE=$2.py + VARFILE=$1.py if [ -e /opt/eteshare/${VARFILE} ]; then VARIABLES="$VARIABLES -V /share/${VARFILE}" else @@ -136,14 +136,14 @@ do TAG="heatbridge" shift if [ $# -ne 3 ];then - echo "Usage: demo.sh heatbridge <stack_name> <service_instance_id> <service>" + echo "Usage: demo.sh <namespace> heatbridge <stack_name> <service_instance_id> <service>" exit fi - VARIABLES="$VARIABLES -v HB_STACK:$2" + VARIABLES="$VARIABLES -v HB_STACK:$1" shift - VARIABLES="$VARIABLES -v HB_SERVICE_INSTANCE_ID:$2" + VARIABLES="$VARIABLES -v HB_SERVICE_INSTANCE_ID:$1" shift - VARIABLES="$VARIABLES -v HB_SERVICE:$2" + VARIABLES="$VARIABLES -v HB_SERVICE:$1" shift ;; *) diff --git a/kubernetes/sdc/resources/config/environments/AUTO.json b/kubernetes/sdc/resources/config/environments/AUTO.json index 85ca2f49d4..ba6fe9705c 100755 --- a/kubernetes/sdc/resources/config/environments/AUTO.json +++ b/kubernetes/sdc/resources/config/environments/AUTO.json @@ -62,7 +62,7 @@ }, "VnfRepo": { "vnfRepoPort": "{{.Values.config.environment.vnfRepoPort}}", - "vnfRepoHost": "{{.Values.config.environment.vnfRepoHost}}" + "vnfRepoHost": "refrepo.{{include "common.namespace" .}}" } }, "override_attributes": { diff --git a/kubernetes/sdc/values.yaml b/kubernetes/sdc/values.yaml index 75e9d3e36f..6882692f04 100644 --- a/kubernetes/sdc/values.yaml +++ b/kubernetes/sdc/values.yaml @@ -31,7 +31,6 @@ config: dcaeUrl: 10.0.2.15 workflowUrl: 10.0.2.15 vnfRepoPort: 8702 - vnfRepoHost: 192.168.50.5 sdc-es: service: diff --git a/kubernetes/sdnc/templates/statefulset.yaml b/kubernetes/sdnc/templates/statefulset.yaml index e821406d45..69816dffb4 100644 --- a/kubernetes/sdnc/templates/statefulset.yaml +++ b/kubernetes/sdnc/templates/statefulset.yaml @@ -89,7 +89,7 @@ spec: - name: SDNC_REPLICAS value: "{{ .Values.replicaCount }}" - name: MYSQL_HOST - value: "{{.Values.mysql.service.name}}.{{.Release.Namespace}}" + value: "{{.Release.Name}}-{{.Values.mysql.nameOverride}}-0.{{.Values.mysql.service.name}}.{{.Release.Namespace}}" volumeMounts: - mountPath: /etc/localtime name: localtime |