diff options
Diffstat (limited to 'kubernetes')
19 files changed, 176 insertions, 48 deletions
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/resources/config/config.json b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/resources/config/config.json new file mode 100644 index 0000000000..3a43f00019 --- /dev/null +++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/resources/config/config.json @@ -0,0 +1,7 @@ +{ + "url":"https://aaf-sms.{{ include "common.namespace" . }}:10443", + "cafile": "/quorumclient/certs/aaf_root_ca.cer", + "clientcert":"client.cert", + "clientkey":"client.key", + "timeout":"10s" +}
\ No newline at end of file diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml index cacc368df1..9905a3cbee 100644 --- a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml +++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/configmap.yaml @@ -23,5 +23,4 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} data: - config.json: | - {{ .Values.config | toJson }} +{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
\ No newline at end of file diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml index 483d6c5f17..281229f95c 100644 --- a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml +++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/templates/statefulset.yaml @@ -63,6 +63,10 @@ spec: - name : {{ include "common.name" . }} configMap: name: {{ include "common.fullname" . }} + items: + - key: config.json + path: config.json + mode: 0755 - name: {{ include "common.fullname" . }}-auth persistentVolumeClaim: claimName: {{ include "common.fullname" . }} diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml index b528270eed..768f89fb7e 100644 --- a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml +++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml @@ -36,14 +36,6 @@ debugEnabled: false # application configuration # Example: -config: - url: "http://aaf-sms:10443" - cafile: "selfsignedca.pem" - clientcert: "server.cert" - clientkey: "server.key" - timeout: "60s" - disable_tls: true - # default number of instances replicaCount: 3 diff --git a/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml index 4235ad01af..4bdb84fa30 100644 --- a/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml +++ b/kubernetes/aaf/charts/aaf-sms/templates/deployment.yaml @@ -40,14 +40,18 @@ spec: - containerPort: {{ .Values.service.internalPort }} {{- if eq .Values.liveness.enabled true }} livenessProbe: - tcpSocket: + httpGet: port: {{ .Values.service.internalPort }} + scheme: HTTPS + path: /v1/sms/quorum/status initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }} periodSeconds: {{ .Values.liveness.periodSeconds }} {{ end -}} readinessProbe: - tcpSocket: + httpGet: port: {{ .Values.service.internalPort }} + scheme: HTTPS + path: /v1/sms/quorum/status initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} periodSeconds: {{ .Values.readiness.periodSeconds }} volumeMounts: diff --git a/kubernetes/aaf/charts/aaf-sms/values.yaml b/kubernetes/aaf/charts/aaf-sms/values.yaml index fa01b38834..df2b6ab640 100644 --- a/kubernetes/aaf/charts/aaf-sms/values.yaml +++ b/kubernetes/aaf/charts/aaf-sms/values.yaml @@ -38,10 +38,10 @@ debugEnabled: false # Example: config: smsdbaddress: "http://aaf-sms-db:8200" - cafile: "/sms/auth/selfsignedca.pem" - servercert: "/sms/auth/server.cert" - serverkey: "/sms/auth/server.key" - disable_tls: true + cafile: "/sms/certs/aaf_root_ca.cer" + servercert: "/sms/certs/aaf-sms.pub" + serverkey: "/sms/certs/aaf-sms.pr" + password: "c2VjcmV0bWFuYWdlbWVudHNlcnZpY2VzZWNyZXRwYXNzd29yZA==" # subchart configuration vault: @@ -57,14 +57,14 @@ affinity: {} # probe configuration parameters liveness: initialDelaySeconds: 10 - periodSeconds: 20 + periodSeconds: 30 # necessary to disable liveness probe when setting breakpoints # in debugger so K8s doesn't restart unresponsive container enabled: true readiness: initialDelaySeconds: 10 - periodSeconds: 20 + periodSeconds: 30 service: type: NodePort diff --git a/kubernetes/aai/charts/aai-resources/resources/config/aaiconfig.properties b/kubernetes/aai/charts/aai-resources/resources/config/aaiconfig.properties index 41676cf869..1763a8eda9 100644 --- a/kubernetes/aai/charts/aai-resources/resources/config/aaiconfig.properties +++ b/kubernetes/aai/charts/aai-resources/resources/config/aaiconfig.properties @@ -103,7 +103,7 @@ aai.transaction.logging=true aai.transaction.logging.get=false aai.transaction.logging.post=false -aai.realtime.clients=SDNC,MSO,SO +aai.realtime.clients=SDNC,MSO,SO,robot-ete #timeout for crud enabled flag aai.crud.timeoutenabled=true diff --git a/kubernetes/aai/charts/aai-traversal/resources/config/aaiconfig.properties b/kubernetes/aai/charts/aai-traversal/resources/config/aaiconfig.properties index b095c4cebb..735609b424 100644 --- a/kubernetes/aai/charts/aai-traversal/resources/config/aaiconfig.properties +++ b/kubernetes/aai/charts/aai-traversal/resources/config/aaiconfig.properties @@ -103,7 +103,7 @@ aai.transaction.logging=true aai.transaction.logging.get=false aai.transaction.logging.post=false -aai.realtime.clients=SDNC,MSO,SO +aai.realtime.clients=SDNC,MSO,SO,robot-ete #timeout for traversal enabled flag aai.traversal.timeoutenabled=true diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties index 007d0e15fe..d59d20d736 100644 --- a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties +++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/appc.properties @@ -81,6 +81,7 @@ appc.LCM.client.name=APPC-EVENT-LISTENER-TEST appc.LCM.provider.user=admin appc.LCM.provider.pass=admin appc.LCM.provider.url=http://localhost:8181/restconf/operations/appc-provider-lcm +appc.LCM.scopeOverlap.endpoint=http://localhost:8181/restconf/operations/interfaces-service:execute-service # properties from appc-netconf-adapter-bundle, appc-dg-common, appc-dmaap-adapter-bundle poolMembers=message-router.{{.Release.Namespace}}:3904 diff --git a/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml b/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml index d631f44f34..ab3ec43eba 100644 --- a/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml +++ b/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml @@ -86,7 +86,6 @@ network.host: 0.0.0.0 # By default, Elasticsearch will bind to the available loopback addresses and will scan ports 9300 to 9305 to try # to connect to other nodes running on the same server. # -#discovery.zen.ping.unicast.hosts: ["elasticsearch.{{.Values.nsPrefix}}" #$discovery.zen.ping.unicast.hosts # # This setting tells Elasticsearch to not elect a master unless there are enough master-eligible nodes diff --git a/kubernetes/consul/resources/config/consul-agent-config/policy-health.json b/kubernetes/consul/resources/config/consul-agent-config/policy-health.json new file mode 100644 index 0000000000..22d135b6dd --- /dev/null +++ b/kubernetes/consul/resources/config/consul-agent-config/policy-health.json @@ -0,0 +1,111 @@ +{ + "service": { + "name": "Health Check: Policy", + "checks": [ + { + "id": "Policy-mariadb-healthcheck", + "name": "Policy Mariadb Health Check", + "script": "/consul/scripts/policy-mariadb-script.sh", + "interval": "10s", + "timeout": "1s" + }, + { + "id": "policy-nexus-local-status", + "name": "Policy Nexus Local Status", + "http": "http://nexus:8081/nexus/service/local/status?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic YWRtaW46YWRtaW4xMjM="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "policy-nexus-internal-metrics", + "name": "Policy Nexus Internal Metrics", + "http": "http://nexus:8081/nexus/internal/metrics?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic YWRtaW46YWRtaW4xMjM="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "policy-nexus-internal-healthcheck", + "name": "Policy Nexus Internal Healthcheck", + "http": "http://nexus:8081/nexus/internal/healthcheck?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic YWRtaW46YWRtaW4xMjM="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "brmsgw-tcp", + "name": "BRMSGW Health Check", + "tcp": "brmsgw:9989", + "interval": "15s", + "timeout": "1s" + }, + { + "id": "drools", + "name": "Drools Health Check", + "http": "http://drools:6969/healthcheck?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic aGVhbHRoY2hlY2s6emIhWHp0RzM0"], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "pap", + "name": "PAP Health Check", + "http": "http://pap:9091/pap/test?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic dGVzdHBhcDphbHBoYTEyMw=="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + }, + { + "id": "pdp", + "name": "PDP Health Check", + "http": "http://pdp:8081/pdp/test?pretty", + "method": "GET", + "header": { + "Authorization": ["Basic dGVzdHBkcDphbHBoYTEyMw=="], + "Cache-Control": ["no-cache"], + "Content-Type": ["application/json"], + "Accept": ["application/json"] + }, + "tls_skip_verify": true, + "interval": "15s", + "timeout": "1s" + } + ] + } +} diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/policy-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/policy-mariadb-script.sh new file mode 100644 index 0000000000..29dbe3f864 --- /dev/null +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/policy-mariadb-script.sh @@ -0,0 +1,14 @@ +NAME=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "[^[:space:]]*-policydb[^[:space:]]*") + + if [ -n "$NAME" ]; then + if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then + echo Success. mariadb process is running. 2>&1 + exit 0 + else + echo Failed. mariadb process is not running. 2>&1 + exit 1 + fi + else + echo Failed. mariadb container is offline. 2>&1 + exit 1 + fi diff --git a/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml b/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml index 404e059bd7..85f429c23d 100644 --- a/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml +++ b/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml @@ -75,7 +75,7 @@ componentImages: inventory: onap/org.onap.dcaegen2.platform.inventory-api:latest policy_handler: onap/org.onap.dcaegen2.platform.policy-handler:latest service_change_handler: onap/org.onap.dcaegen2.platform.servicechange-handler:latest - tca: onap/org.onap.dcaegen2.deployments.tca-cdap-container.tca-cdap-container:latest + tca: onap/org.onap.dcaegen2.deployments.tca-cdap-container:latest ves: onap/org.onap.dcaegen2.collectors.ves.vescollector:latest # Kubernetes namespace for components deployed via Cloudify manager diff --git a/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml b/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml index 212307cae0..b3bd31726b 100644 --- a/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml +++ b/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml @@ -25,7 +25,6 @@ global: ################################################################# # Application configuration defaults. ################################################################# -#nsPrefix: onap pullPolicy: Always # application images diff --git a/kubernetes/log/charts/log-elasticsearch/resources/config/elasticsearch.yml b/kubernetes/log/charts/log-elasticsearch/resources/config/elasticsearch.yml index abdab8beb5..8bbb01997e 100644 --- a/kubernetes/log/charts/log-elasticsearch/resources/config/elasticsearch.yml +++ b/kubernetes/log/charts/log-elasticsearch/resources/config/elasticsearch.yml @@ -86,7 +86,6 @@ network.host: 0.0.0.0 # By default, Elasticsearch will bind to the available loopback addresses and will scan ports 9300 to 9305 to try # to connect to other nodes running on the same server. # -#discovery.zen.ping.unicast.hosts: ["elasticsearch.{{.Values.nsPrefix}}" #$discovery.zen.ping.unicast.hosts # # This setting tells Elasticsearch to not elect a master unless there are enough master-eligible nodes diff --git a/kubernetes/oof/charts/oof-has/values.yaml b/kubernetes/oof/charts/oof-has/values.yaml index 30ef637873..25a076b6cc 100755 --- a/kubernetes/oof/charts/oof-has/values.yaml +++ b/kubernetes/oof/charts/oof-has/values.yaml @@ -27,7 +27,6 @@ global: optf_has: onap/optf-has:latest filebeat: docker.elastic.co/beats/filebeat:5.5.0 -nsPrefix: onap pullPolicy: Always nodePortPrefix: 302 dataRootDir: /dockerdata-nfs diff --git a/kubernetes/portal/charts/portal-mariadb/values.yaml b/kubernetes/portal/charts/portal-mariadb/values.yaml index ab469848f2..ae5849eb27 100644 --- a/kubernetes/portal/charts/portal-mariadb/values.yaml +++ b/kubernetes/portal/charts/portal-mariadb/values.yaml @@ -77,14 +77,14 @@ affinity: {} # probe configuration parameters liveness: - initialDelaySeconds: 10 + initialDelaySeconds: 300 periodSeconds: 10 # necessary to disable liveness probe when setting breakpoints # in debugger so K8s doesn't restart unresponsive container enabled: true readiness: - initialDelaySeconds: 10 + initialDelaySeconds: 300 periodSeconds: 10 ## Persist data to a persitent volume diff --git a/kubernetes/robot/demo-k8s.sh b/kubernetes/robot/demo-k8s.sh index de3362740e..f5e4398940 100755 --- a/kubernetes/robot/demo-k8s.sh +++ b/kubernetes/robot/demo-k8s.sh @@ -69,7 +69,7 @@ do VARIABLES="$VARIABLES -v WEB_PASSWORD:$WEB_PASSWORD" shift if [ $# -eq 2 ];then - VARIABLES="$VARIABLES -v HOSTS_PREFIX:$2" + VARIABLES="$VARIABLES -v HOSTS_PREFIX:$1" fi shift ;; @@ -85,7 +85,7 @@ do TAG="InitDistribution" shift if [ $# -eq 1 ];then - VARIABLES="$VARIABLES -v DEMO_PREFIX:$2" + VARIABLES="$VARIABLES -v DEMO_PREFIX:$1" fi shift ;; @@ -93,24 +93,24 @@ do TAG="PreloadDemo" shift if [ $# -ne 2 ];then - echo "Usage: demo.sh preload <vnf_name> <module_name>" + echo "Usage: demo.sh <namespace> preload <vnf_name> <module_name>" exit fi - VARIABLES="$VARIABLES -v VNF_NAME:$2" + VARIABLES="$VARIABLES -v VNF_NAME:$1" shift - VARIABLES="$VARIABLES -v MODULE_NAME:$2" + VARIABLES="$VARIABLES -v MODULE_NAME:$1" shift ;; appc) - TAG="APPCMountPointDemo" - shift - if [ $# -ne 1 ];then - echo "Usage: demo.sh appc <module_name>" - exit - fi - VARIABLES="$VARIABLES -v MODULE_NAME:$2" - shift - ;; + TAG="APPCMountPointDemo" + shift + if [ $# -ne 1 ];then + echo "Usage: demo.sh <namespace> appc <module_name>" + exit + fi + VARIABLES="$VARIABLES -v MODULE_NAME:$1" + shift + ;; instantiateVFW) TAG="instantiateVFW" VARIABLES="$VARIABLES -v GLOBAL_BUILD_NUMBER:$$" @@ -120,10 +120,10 @@ do TAG="deleteVNF" shift if [ $# -ne 1 ];then - echo "Usage: demo.sh deleteVNF <module_name from instantiateVFW>" + echo "Usage: demo.sh <namespace> deleteVNF <module_name from instantiateVFW>" exit fi - VARFILE=$2.py + VARFILE=$1.py if [ -e /opt/eteshare/${VARFILE} ]; then VARIABLES="$VARIABLES -V /share/${VARFILE}" else @@ -136,14 +136,14 @@ do TAG="heatbridge" shift if [ $# -ne 3 ];then - echo "Usage: demo.sh heatbridge <stack_name> <service_instance_id> <service>" + echo "Usage: demo.sh <namespace> heatbridge <stack_name> <service_instance_id> <service>" exit fi - VARIABLES="$VARIABLES -v HB_STACK:$2" + VARIABLES="$VARIABLES -v HB_STACK:$1" shift - VARIABLES="$VARIABLES -v HB_SERVICE_INSTANCE_ID:$2" + VARIABLES="$VARIABLES -v HB_SERVICE_INSTANCE_ID:$1" shift - VARIABLES="$VARIABLES -v HB_SERVICE:$2" + VARIABLES="$VARIABLES -v HB_SERVICE:$1" shift ;; *) diff --git a/kubernetes/sdnc/templates/statefulset.yaml b/kubernetes/sdnc/templates/statefulset.yaml index e821406d45..69816dffb4 100644 --- a/kubernetes/sdnc/templates/statefulset.yaml +++ b/kubernetes/sdnc/templates/statefulset.yaml @@ -89,7 +89,7 @@ spec: - name: SDNC_REPLICAS value: "{{ .Values.replicaCount }}" - name: MYSQL_HOST - value: "{{.Values.mysql.service.name}}.{{.Release.Namespace}}" + value: "{{.Release.Name}}-{{.Values.mysql.nameOverride}}-0.{{.Values.mysql.service.name}}.{{.Release.Namespace}}" volumeMounts: - mountPath: /etc/localtime name: localtime |