diff options
-rw-r--r-- | kubernetes/aai/templates/aai-resources-deployment.yaml | 381 | ||||
-rw-r--r-- | kubernetes/aai/templates/aai-traversal-deployment.yaml | 211 | ||||
-rw-r--r-- | kubernetes/aai/templates/all-services.yaml | 136 | ||||
-rw-r--r-- | kubernetes/clamp/Chart.yaml | 4 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/Chart.yaml | 4 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/NOTES.txt | 19 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf (renamed from kubernetes/clamp/resources/config/mariadb/conf.d/conf1/my.cnf) | 0 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql (renamed from kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql) | 0 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql (renamed from kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql) | 0 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql (renamed from kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql) | 0 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh (renamed from kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh) | 0 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/templates/configmap.yaml (renamed from kubernetes/clamp/templates/clamp-mariadb-configmap.yaml) | 8 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/templates/deployment.yaml | 94 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/templates/pv.yaml | 21 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/templates/pvc.yaml | 32 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/templates/secrets.yaml | 13 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/templates/service.yaml | 24 | ||||
-rw-r--r-- | kubernetes/clamp/charts/mariadb/values.yaml | 91 | ||||
-rw-r--r-- | kubernetes/clamp/requirements.yaml | 7 | ||||
-rw-r--r-- | kubernetes/clamp/templates/NOTES.txt | 19 | ||||
-rw-r--r-- | kubernetes/clamp/templates/all-services.yaml | 43 | ||||
-rw-r--r-- | kubernetes/clamp/templates/clamp-deployment.yaml | 52 | ||||
-rw-r--r-- | kubernetes/clamp/templates/clamp-mariadb-deployment.yaml | 62 | ||||
-rw-r--r-- | kubernetes/clamp/templates/clamp-pv-pvc.yaml | 32 | ||||
-rw-r--r-- | kubernetes/clamp/templates/configmap.yaml | 12 | ||||
-rw-r--r-- | kubernetes/clamp/templates/deployment.yaml | 71 | ||||
-rw-r--r-- | kubernetes/clamp/templates/service.yaml | 25 | ||||
-rw-r--r-- | kubernetes/clamp/values.yaml | 96 | ||||
-rw-r--r-- | kubernetes/common/values.yaml | 4 | ||||
-rwxr-xr-x | kubernetes/config/docker/init/src/config/consul/consul-agent-config/bin/kubectl | bin | 72337373 -> 0 bytes | |||
-rwxr-xr-x | kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-script.sh | 17 | ||||
-rwxr-xr-x | kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/appc-dbhost-script.sh | 13 | ||||
-rwxr-xr-x | kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/gremlin-script.sh | 16 | ||||
-rwxr-xr-x | kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh | 13 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/aai-data-router-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-data-router-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-hbase-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/aai-model-loader-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-model-loader-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-search-data-service-health.json) | 8 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-services-health.json) | 13 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/aai-sparky-be-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-sparky-be-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/aai-tabular-backend-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-tabular-backend-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-dbbuilder.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/appc-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb01-healthcheck.json) | 4 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb02-healthcheck.json) | 4 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnhost.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.crt.pem (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/certs/client-cert-onap.crt.pem) | 0 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.key.pem (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/certs/client-cert-onap.key.pem) | 0 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-elastic-search.json) | 4 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/log-kibana.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-kibana.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/log-logstash.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-logstash.json) | 20 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/model-loader.properties (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/model-loader.properties) | 0 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-dmaap-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/mr-kafka-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-kafka-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/mr-zookeeper-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-zookeeper-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/msb-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json) | 16 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/mso-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-health.json) | 6 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-mariabdb.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/multicloud-health-check.json) | 8 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-doc.txt) | 0 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh | 17 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh | 13 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/data-router-script.sh) | 4 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/model-loader-script.sh) | 4 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mr-kafka-health.sh) | 4 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mr-zookeeper-health.sh) | 4 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-api-script.sh) | 2 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-camunda-script.sh) | 2 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-jra-script.sh) | 2 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-mariadb-script.sh) | 4 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-be-script.sh) | 2 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-cs-script.sh) | 2 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-fe-script.sh) | 2 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-titan-script.sh) | 2 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh | 13 | ||||
-rw-r--r--[-rwxr-xr-x] | kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/search-data-service-availability.sh) | 16 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sparky-be-script.sh) | 4 | ||||
-rwxr-xr-x | kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/tabular-db-availability.sh) | 2 | ||||
-rwxr-xr-x[-rw-r--r--] | kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/vid-mariadb-script.sh) | 4 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/sdc-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdc-health.json) | 10 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/sdnc-dbhost.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dbhost.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dgbuilder.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-portal-health.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb01-healthcheck.json) | 4 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb02-healthcheck.json) | 4 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnhost.json) | 2 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/vfc-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json) | 60 | ||||
-rw-r--r-- | kubernetes/consul/resources/config/consul-agent-config/vid-health.json (renamed from kubernetes/config/docker/init/src/config/consul/consul-agent-config/vid-health.json) | 4 | ||||
-rw-r--r-- | kubernetes/consul/templates/consul-agent-configmap.yaml | 25 | ||||
-rw-r--r-- | kubernetes/consul/templates/consul-agent-deployment.yaml | 17 | ||||
-rw-r--r-- | kubernetes/consul/templates/consul-server-deployment.yaml | 9 | ||||
-rw-r--r-- | kubernetes/consul/values.yaml | 5 | ||||
-rw-r--r-- | kubernetes/dcaegen2/templates/pod.yaml | 2 | ||||
-rw-r--r-- | kubernetes/dcaegen2/values.yaml | 5 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/.helmignore | 21 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/Chart.yaml | 4 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/README.md | 14 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/requirements.yaml | 4 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/resources/config/README.txt | 10 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/resources/config/application.properties | 1 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/templates/NOTES.txt | 19 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/templates/configmap.yaml | 7 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/templates/deployment.yaml | 90 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/templates/service.yaml | 39 | ||||
-rw-r--r-- | kubernetes/helm/starters/onap-app/values.yaml | 78 | ||||
-rw-r--r-- | kubernetes/msb/values.yaml | 4 | ||||
-rw-r--r-- | kubernetes/onap/requirements.yaml | 2 | ||||
-rw-r--r-- | kubernetes/onap/values.yaml | 2 | ||||
-rw-r--r-- | kubernetes/robot/.helmignore | 22 | ||||
-rw-r--r-- | kubernetes/robot/Chart.yaml | 4 | ||||
-rw-r--r-- | kubernetes/robot/all-services.yaml | 13 | ||||
-rw-r--r-- | kubernetes/robot/requirements.yaml | 7 | ||||
-rw-r--r-- | kubernetes/robot/templates/NOTES.txt | 19 | ||||
-rw-r--r-- | kubernetes/robot/templates/all-services.yaml | 16 | ||||
-rw-r--r-- | kubernetes/robot/templates/configmap.yaml (renamed from kubernetes/robot/templates/robot-dep-configmap.yaml) | 14 | ||||
-rw-r--r-- | kubernetes/robot/templates/deployment.yaml | 82 | ||||
-rw-r--r-- | kubernetes/robot/templates/robot-deployment.yaml | 63 | ||||
-rw-r--r-- | kubernetes/robot/templates/service.yaml | 25 | ||||
-rw-r--r-- | kubernetes/robot/values.yaml | 82 | ||||
-rw-r--r-- | kubernetes/so/values.yaml | 2 |
121 files changed, 1858 insertions, 673 deletions
diff --git a/kubernetes/aai/templates/aai-resources-deployment.yaml b/kubernetes/aai/templates/aai-resources-deployment.yaml index c07214d958..2e9269ecbe 100644 --- a/kubernetes/aai/templates/aai-resources-deployment.yaml +++ b/kubernetes/aai/templates/aai-resources-deployment.yaml @@ -14,6 +14,387 @@ spec: labels: app: aai-resources name: aai-resources + annotations: + msb.onap.org/service-info: '[ + { + "serviceName": "_aai-cloudInfrastructure", + "version": "v11", + "url": "/aai/v11/cloud-infrastructure", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/cloud-infrastructure" + }, + { + "serviceName": "_aai-cloudInfrastructure", + "version": "v12", + "url": "/aai/v12/cloud-infrastructure", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/cloud-infrastructure" + }, + { + "serviceName": "_aai-cloudInfrastructure", + "version": "v13", + "url": "/aai/v13/cloud-infrastructure", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/cloud-infrastructure" + }, + { + "serviceName": "_aai-business", + "version": "v11", + "url": "/aai/v11/business", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/business" + }, + { + "serviceName": "_aai-business", + "version": "v12", + "url": "/aai/v12/business", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/business" + }, + { + "serviceName": "_aai-business", + "version": "v13", + "url": "/aai/v13/business", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/business" + }, + { + "serviceName": "_aai-actions", + "version": "v11", + "url": "/aai/v11/actions", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/actions" + }, + { + "serviceName": "_aai-actions", + "version": "v12", + "url": "/aai/v12/actions", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/actions" + }, + { + "serviceName": "_aai-actions", + "version": "v13", + "url": "/aai/v13/actions", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/actions" + }, + { + "serviceName": "_aai-service-design-and-creation", + "version": "v11", + "url": "/aai/v11/service-design-and-creation", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/service-design-and-creation" + }, + { + "serviceName": "_aai-service-design-and-creation", + "version": "v12", + "url": "/aai/v12/service-design-and-creation", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/service-design-and-creation" + }, + { + "serviceName": "_aai-service-design-and-creation", + "version": "v13", + "url": "/aai/v13/service-design-and-creation", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/service-design-and-creation" + }, + { + "serviceName": "_aai-network", + "version": "v11", + "url": "/aai/v11/network", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/network" + }, + { + "serviceName": "_aai-network", + "version": "v12", + "url": "/aai/v12/network", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/network" + }, + { + "serviceName": "_aai-network", + "version": "v13", + "url": "/aai/v13/network", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/network" + }, + { + "serviceName": "_aai-externalSystem", + "version": "v11", + "url": "/aai/v11/external-system", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/external-system" + }, + { + "serviceName": "_aai-externalSystem", + "version": "v12", + "url": "/aai/v12/external-system", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/external-system" + }, + { + "serviceName": "_aai-externalSystem", + "version": "v13", + "url": "/aai/v13/external-system", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/external-system" + }, + { + "serviceName": "aai-cloudInfrastructure", + "version": "v11", + "url": "/aai/v11/cloud-infrastructure", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-cloudInfrastructure", + "version": "v12", + "url": "/aai/v12/cloud-infrastructure", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-cloudInfrastructure", + "version": "v13", + "url": "/aai/v13/cloud-infrastructure", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-business", + "version": "v11", + "url": "/aai/v11/business", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-business", + "version": "v12", + "url": "/aai/v12/business", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-business", + "version": "v13", + "url": "/aai/v13/business", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-actions", + "version": "v11", + "url": "/aai/v11/actions", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-actions", + "version": "v12", + "url": "/aai/v12/actions", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-actions", + "version": "v13", + "url": "/aai/v13/actions", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-service-design-and-creation", + "version": "v11", + "url": "/aai/v11/service-design-and-creation", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-service-design-and-creation", + "version": "v12", + "url": "/aai/v12/service-design-and-creation", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-service-design-and-creation", + "version": "v13", + "url": "/aai/v13/service-design-and-creation", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-network", + "version": "v11", + "url": "/aai/v11/network", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-network", + "version": "v12", + "url": "/aai/v12/network", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-network", + "version": "v13", + "url": "/aai/v13/network", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-externalSystem", + "version": "v11", + "url": "/aai/v11/external-system", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-externalSystem", + "version": "v12", + "url": "/aai/v12/external-system", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-externalSystem", + "version": "v13", + "url": "/aai/v13/external-system", + "protocol": "REST", + "port": "8447", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + } + ]' spec: initContainers: - command: diff --git a/kubernetes/aai/templates/aai-traversal-deployment.yaml b/kubernetes/aai/templates/aai-traversal-deployment.yaml index 84a0d0f289..4dbcd7479a 100644 --- a/kubernetes/aai/templates/aai-traversal-deployment.yaml +++ b/kubernetes/aai/templates/aai-traversal-deployment.yaml @@ -14,6 +14,217 @@ spec: labels: app: aai-traversal name: aai-traversal + annotations: + msb.onap.org/service-info: '[ + { + "serviceName": "_aai-generic-query", + "version": "v11", + "url": "/aai/v11/search/generic-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/search/generic-query" + }, + { + "serviceName": "_aai-generic-query", + "version": "v12", + "url": "/aai/v12/search/generic-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/search/generic-query" + }, + { + "serviceName": "_aai-generic-query", + "version": "v13", + "url": "/aai/v13/search/generic-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/search/generic-query" + }, + { + "serviceName": "_aai-nodes-query", + "version": "v11", + "url": "/aai/v11/search/nodes-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/search/nodes-query" + }, + { + "serviceName": "_aai-nodes-query", + "version": "v12", + "url": "/aai/v12/search/nodes-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/search/nodes-query" + }, + { + "serviceName": "_aai-nodes-query", + "version": "v13", + "url": "/aai/v13/search/nodes-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/search/nodes-query" + }, + { + "serviceName": "_aai-query", + "version": "v11", + "url": "/aai/v11/query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v11/query" + }, + { + "serviceName": "_aai-query", + "version": "v12", + "url": "/aai/v12/query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v12/query" + }, + { + "serviceName": "_aai-query", + "version": "v13", + "url": "/aai/v13/query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/v13/query" + }, + { + "serviceName": "_aai-named-query", + "url": "/aai/search", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1", + "path": "/aai/search" + }, + { + "serviceName": "aai-generic-query", + "version": "v11", + "url": "/aai/v11/search/generic-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-generic-query", + "version": "v12", + "url": "/aai/v12/search/generic-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-generic-query", + "version": "v13", + "url": "/aai/v13/search/generic-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-nodes-query", + "version": "v11", + "url": "/aai/v11/search/nodes-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-nodes-query", + "version": "v12", + "url": "/aai/v12/search/nodes-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-nodes-query", + "version": "v13", + "url": "/aai/v13/search/nodes-query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-query", + "version": "v11", + "url": "/aai/v11/query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-query", + "version": "v12", + "url": "/aai/v12/query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-query", + "version": "v13", + "url": "/aai/v13/query", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + }, + { + "serviceName": "aai-named-query", + "url": "/aai/search", + "protocol": "REST", + "port": "8446", + "enable_ssl": true, + "lb_policy":"ip_hash", + "visualRange": "1" + } + ]' spec: initContainers: - command: diff --git a/kubernetes/aai/templates/all-services.yaml b/kubernetes/aai/templates/all-services.yaml index 01f24a7dd2..b1ee989b4d 100644 --- a/kubernetes/aai/templates/all-services.yaml +++ b/kubernetes/aai/templates/all-services.yaml @@ -35,142 +35,6 @@ metadata: namespace: "{{ .Values.nsPrefix }}" labels: app: aai-service - annotations: - msb.onap.org/service-info: '[ - { - "serviceName": "aai-cloudInfrastructure", - "version": "v11", - "url": "/aai/v11/cloud-infrastructure", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1" - }, - { - "serviceName": "aai-cloudInfrastructure-deprecated", - "version": "v11", - "url": "/aai/v11/cloud-infrastructure", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1", - "path": "/aai/v11/cloud-infrastructure" - }, - { - "serviceName": "aai-business", - "version": "v11", - "url": "/aai/v11/business", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1" - }, - { - "serviceName": "aai-business-deprecated", - "version": "v11", - "url": "/aai/v11/business", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1", - "path": "/aai/v11/business" - }, - { - "serviceName": "aai-search", - "version": "v11", - "url": "/aai/v11/search", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1" - }, - { - "serviceName": "aai-search-deprecated", - "version": "v11", - "url": "/aai/v11/search", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1", - "path": "/aai/v11/search" - }, - { - "serviceName": "aai-actions", - "version": "v11", - "url": "/aai/v11/actions", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1" - }, - { - "serviceName": "aai-actions-deprecated", - "version": "v11", - "url": "/aai/v11/actions", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1", - "path": "/aai/v11/actions" - }, - { - "serviceName": "aai-service-design-and-creation", - "version": "v11", - "url": "/aai/v11/service-design-and-creation", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1" - }, - { - "serviceName": "aai-service-design-and-creation-deprecated", - "version": "v11", - "url": "/aai/v11/service-design-and-creation", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1", - "path": "/aai/v11/service-design-and-creation" - }, - { - "serviceName": "aai-network", - "version": "v11", - "url": "/aai/v11/network", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1" - }, - { - "serviceName": "aai-network-deprecated", - "version": "v11", - "url": "/aai/v11/network", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1", - "path": "/aai/v11/network" - }, - { - "serviceName": "aai-externalSystem", - "version": "v11", - "url": "/aai/v11/external-system", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1" - }, - { - "serviceName": "aai-externalSystem-deprecated", - "version": "v11", - "url": "/aai/v11/external-system", - "protocol": "REST", - "port": "8443", - "enable_ssl": true, - "visualRange": "1", - "path": "/aai/v11/external-system" - } - ]' spec: ports: - name: "aai-service-port-8443" diff --git a/kubernetes/clamp/Chart.yaml b/kubernetes/clamp/Chart.yaml index a162ca7dd0..2f68750919 100644 --- a/kubernetes/clamp/Chart.yaml +++ b/kubernetes/clamp/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 -description: A Helm chart for Kubernetes +description: ONAP Clamp name: clamp -version: 1.1.0 +version: 2.0.0 diff --git a/kubernetes/clamp/charts/mariadb/Chart.yaml b/kubernetes/clamp/charts/mariadb/Chart.yaml new file mode 100644 index 0000000000..da9cab3175 --- /dev/null +++ b/kubernetes/clamp/charts/mariadb/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +description: MariaDB Service +name: mariadb +version: 2.0.0 diff --git a/kubernetes/clamp/charts/mariadb/NOTES.txt b/kubernetes/clamp/charts/mariadb/NOTES.txt new file mode 100644 index 0000000000..91d8ed42f1 --- /dev/null +++ b/kubernetes/clamp/charts/mariadb/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.externalPort }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }} +{{- end }} diff --git a/kubernetes/clamp/resources/config/mariadb/conf.d/conf1/my.cnf b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf index 0be1bd7aa3..0be1bd7aa3 100644 --- a/kubernetes/clamp/resources/config/mariadb/conf.d/conf1/my.cnf +++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf diff --git a/kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql index 3312daf56f..3312daf56f 100644 --- a/kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql +++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql diff --git a/kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql index 112cb2b55e..112cb2b55e 100644 --- a/kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql +++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql diff --git a/kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql index 478eaf0e09..478eaf0e09 100644 --- a/kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql +++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql diff --git a/kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh index 4924922c91..4924922c91 100644 --- a/kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh +++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh diff --git a/kubernetes/clamp/templates/clamp-mariadb-configmap.yaml b/kubernetes/clamp/charts/mariadb/templates/configmap.yaml index 9a5fd8f284..8b58aedcb0 100644 --- a/kubernetes/clamp/templates/clamp-mariadb-configmap.yaml +++ b/kubernetes/clamp/charts/mariadb/templates/configmap.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: clamp-entrypoint-initdb-configmap - namespace: {{ .Values.nsPrefix }} + namespace: {{ include "common.namespace" . }} data: {{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/*").AsConfig . | indent 2 }} --- @@ -11,7 +11,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: clamp-entrypoint-drop-configmap - namespace: {{ .Values.nsPrefix }} + namespace: {{ include "common.namespace" . }} data: {{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/drop/*").AsConfig . | indent 2 }} --- @@ -19,7 +19,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: clamp-entrypoint-bulkload-configmap - namespace: {{ .Values.nsPrefix }} + namespace: {{ include "common.namespace" . }} data: {{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/*").AsConfig . | indent 2 }} --- @@ -27,7 +27,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: clamp-mariadb-conf-configmap - namespace: {{ .Values.nsPrefix }} + namespace: {{ include "common.namespace" . }} data: {{ tpl (.Files.Glob "resources/config/mariadb/conf.d/conf1/*").AsConfig . | indent 2 }} #{{ end }} diff --git a/kubernetes/clamp/charts/mariadb/templates/deployment.yaml b/kubernetes/clamp/charts/mariadb/templates/deployment.yaml new file mode 100644 index 0000000000..1e17b9b139 --- /dev/null +++ b/kubernetes/clamp/charts/mariadb/templates/deployment.yaml @@ -0,0 +1,94 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ include "common.name" . }} + release: {{ .Release.Name }} + spec: + containers: + - name: {{ include "common.name" . }} + image: {{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }} + imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.internalPort }} + # disable liveness probe when breakpoints set in debugger + # so K8s doesn't restart unresponsive container + {{- if eq .Values.liveness.enabled true }} + livenessProbe: + tcpSocket: + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.liveness.periodSeconds }} + {{ end -}} + readinessProbe: + tcpSocket: + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.readiness.periodSeconds }} + env: + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "common.fullname" . }} + key: db-root-password + volumeMounts: + - mountPath: /docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh + name: docker-entrypoint-initdb + subPath: load-sql-files-tests-automation.sh + - mountPath: /etc/localtime + name: localtime + readOnly: true + - mountPath: /docker-entrypoint-initdb.d/drop/ + name: docker-entrypoint-clds-drop + - mountPath: /docker-entrypoint-initdb.d/bulkload/ + name: docker-entrypoint-bulkload + - mountPath: /etc/mysql/conf.d/conf1/ + name: clamp-mariadb-conf + - mountPath: /var/lib/mysql + name: clamp-mariadb-data + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 10 }} + {{- end -}} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 10 }} + {{- end }} + volumes: + {{- if .Values.persistence.enabled }} + - name: clamp-mariadb-data + persistentVolumeClaim: + claimName: {{ include "common.fullname" . }} + {{- else }} + emptyDir: {} + {{- end }} + - name: docker-entrypoint-initdb + configMap: + name: clamp-entrypoint-initdb-configmap + - name: docker-entrypoint-clds-drop + configMap: + name: clamp-entrypoint-drop-configmap + - name: docker-entrypoint-bulkload + configMap: + name: clamp-entrypoint-bulkload-configmap + - name: clamp-mariadb-conf + configMap: + name: clamp-mariadb-conf-configmap + - name: localtime + hostPath: + path: /etc/localtime + imagePullSecrets: + - name: "{{ include "common.namespace" . }}-docker-registry-key" diff --git a/kubernetes/clamp/charts/mariadb/templates/pv.yaml b/kubernetes/clamp/charts/mariadb/templates/pv.yaml new file mode 100644 index 0000000000..31230a9ed7 --- /dev/null +++ b/kubernetes/clamp/charts/mariadb/templates/pv.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}} +kind: PersistentVolume +apiVersion: v1 +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + name: {{ include "common.fullname" . }} +spec: + capacity: + storage: {{ .Values.persistence.size}} + accessModes: + - {{ .Values.persistence.accessMode }} + persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }} + hostPath: + path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }} +{{- end -}} diff --git a/kubernetes/clamp/charts/mariadb/templates/pvc.yaml b/kubernetes/clamp/charts/mariadb/templates/pvc.yaml new file mode 100644 index 0000000000..b0cd3bf238 --- /dev/null +++ b/kubernetes/clamp/charts/mariadb/templates/pvc.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + name: {{ include "common.fullname" . }} + accessModes: + - {{ .Values.persistence.accessMode }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end -}} diff --git a/kubernetes/clamp/charts/mariadb/templates/secrets.yaml b/kubernetes/clamp/charts/mariadb/templates/secrets.yaml new file mode 100644 index 0000000000..4734d1f242 --- /dev/null +++ b/kubernetes/clamp/charts/mariadb/templates/secrets.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + db-root-password: {{ .Values.config.mysqlPassword | b64enc | quote }} diff --git a/kubernetes/clamp/charts/mariadb/templates/service.yaml b/kubernetes/clamp/charts/mariadb/templates/service.yaml new file mode 100644 index 0000000000..94ec4694a3 --- /dev/null +++ b/kubernetes/clamp/charts/mariadb/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.service.type }} + ports: + {{if eq .Values.service.type "NodePort" -}} + - port: {{ .Values.service.internalPort }} + nodePort: {{ .Values.global.nodePortPrefix | default "302" }}{{ .Values.service.nodePort }} + {{- else -}} + - port: {{ .Values.service.externalPort }} + targetPort: {{ .Values.service.internalPort }} + {{- end}} + name: {{ .Values.service.name }} + selector: + app: {{ include "common.name" . }} + release: {{ .Release.Name }}
\ No newline at end of file diff --git a/kubernetes/clamp/charts/mariadb/values.yaml b/kubernetes/clamp/charts/mariadb/values.yaml new file mode 100644 index 0000000000..c525fec597 --- /dev/null +++ b/kubernetes/clamp/charts/mariadb/values.yaml @@ -0,0 +1,91 @@ +################################################################# +# Global configuration defaults. +################################################################# +global: # global defaults + nodePortPrefix: 302 + repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ== + + persistence: {} + +# application image +repository: nexus3.onap.org:10001 +image: mariadb:10.1.11 +pullPolicy: Always + +################################################################# +# Application configuration defaults. +################################################################# +config: + mysqlPassword: strong_pitchou + +# default number of instances +replicaCount: 1 + +nodeSelector: {} + +affinity: {} + +# probe configuration parameters +liveness: + initialDelaySeconds: 10 + periodSeconds: 10 + # necessary to disable liveness probe when setting breakpoints + # in debugger so K8s doesn't restart unresponsive container + enabled: true + +readiness: + initialDelaySeconds: 10 + periodSeconds: 10 + +## Persist data to a persitent volume +persistence: + enabled: true + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + volumeReclaimPolicy: Retain + + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: <storageClass> + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteMany + size: 2Gi + mountPath: /dockerdata-nfs + mountSubPath: clamp/mariadb/data + +service: + type: ClusterIP + name: mariadb + internalPort: 3306 + externalPort: 3306 + + +ingress: + enabled: false + + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # + # Example: + # Configure resource requests and limits + # ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # Minimum memory for development is 2 CPU cores and 4GB memory + # Minimum memory for production is 4 CPU cores and 8GB memory +#resources: +# limits: +# cpu: 2 +# memory: 4Gi +# requests: +# cpu: 2 +# memory: 4Gi diff --git a/kubernetes/clamp/requirements.yaml b/kubernetes/clamp/requirements.yaml new file mode 100644 index 0000000000..56029ab047 --- /dev/null +++ b/kubernetes/clamp/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: common + version: ~2.0.0 + # local reference to common chart, as it is + # a part of this chart's package and will not + # be published independently to a repo (at this point) + repository: '@local'
\ No newline at end of file diff --git a/kubernetes/clamp/templates/NOTES.txt b/kubernetes/clamp/templates/NOTES.txt new file mode 100644 index 0000000000..91d8ed42f1 --- /dev/null +++ b/kubernetes/clamp/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.externalPort }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }} +{{- end }} diff --git a/kubernetes/clamp/templates/all-services.yaml b/kubernetes/clamp/templates/all-services.yaml deleted file mode 100644 index c9f4f5ed5f..0000000000 --- a/kubernetes/clamp/templates/all-services.yaml +++ /dev/null @@ -1,43 +0,0 @@ -#{{ if not .Values.disableClampClampMariadb }} -apiVersion: v1 -kind: Service -metadata: - name: clamp-mariadb - namespace: "{{ .Values.nsPrefix }}" - labels: - app: clamp-mariadb -spec: - ports: - - name: clamp-mariadb - port: 3306 - selector: - app: clamp-mariadb - clusterIP: None -#{{ end }} -#{{ if not .Values.disableClampClamp }} ---- -apiVersion: v1 -kind: Service -metadata: - name: clamp - namespace: "{{ .Values.nsPrefix }}" - annotations: - msb.onap.org/service-info: '[ - { - "serviceName": "clamp", - "version": "v1", - "url": "/restservices/clds/v1", - "protocol": "REST", - "port": "8080", - "visualRange":"1" - } - ]' -spec: - ports: - - name: clamp - port: 8080 - nodePort: {{ .Values.nodePortPrefix }}95 - selector: - app: clamp - type: NodePort -#{{ end }}
\ No newline at end of file diff --git a/kubernetes/clamp/templates/clamp-deployment.yaml b/kubernetes/clamp/templates/clamp-deployment.yaml deleted file mode 100644 index 2c2e13f994..0000000000 --- a/kubernetes/clamp/templates/clamp-deployment.yaml +++ /dev/null @@ -1,52 +0,0 @@ -#{{ if not .Values.disableClampClamp }} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - labels: - app: clamp - name: clamp - namespace: "{{ .Values.nsPrefix }}" -spec: - replicas: {{ .Values.clampReplicas }} - selector: - matchLabels: - app: clamp - template: - metadata: - labels: - app: clamp - name: clamp - spec: - initContainers: - - command: - - /root/ready.py - args: - - --container-name - - clamp-mariadb - env: - - name: NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - image: {{ .Values.image.readiness }} - imagePullPolicy: {{ .Values.pullPolicy }} - name: clamp-readiness - containers: - - env: - - name: SPRING_APPLICATION_JSON - value: '{ - "spring.datasource.camunda.url": "jdbc:mariadb:sequential://clamp-mariadb.{{ .Values.nsPrefix }}:3306/camundabpm?verifyServerCertificate=false&useSSL=false&requireSSL=false&autoReconnect=true&retriesAllDown=2147483647&failoverLoopRetries=2147483647", - "spring.datasource.cldsdb.url": "jdbc:mariadb:sequential://clamp-mariadb.{{ .Values.nsPrefix }}:3306/cldsdb4?verifyServerCertificate=false&useSSL=false&requireSSL=false&autoReconnect=true&retriesAllDown=2147483647&failoverLoopRetries=2147483647" - }' - image: {{ .Values.image.clampImage }}:{{ .Values.image.clampVersion }} - imagePullPolicy: {{ .Values.pullPolicy }} - name: clamp - readinessProbe: - tcpSocket: - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 10 - imagePullSecrets: - - name: {{ .Values.nsPrefix }}-docker-registry-key -#{{ end }} diff --git a/kubernetes/clamp/templates/clamp-mariadb-deployment.yaml b/kubernetes/clamp/templates/clamp-mariadb-deployment.yaml deleted file mode 100644 index a12489ef63..0000000000 --- a/kubernetes/clamp/templates/clamp-mariadb-deployment.yaml +++ /dev/null @@ -1,62 +0,0 @@ -#{{ if not .Values.disableClampClampMariadb }} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: clamp-mariadb - namespace: "{{ .Values.nsPrefix }}" -spec: - replicas: {{ .Values.clampMariaDbReplicas }} - selector: - matchLabels: - app: clamp-mariadb - template: - metadata: - labels: - app: clamp-mariadb - name: clamp-mariadb - spec: - hostname: clamp-mariadb - containers: - - args: - image: {{ .Values.image.mariadbImage }}:{{ .Values.image.mariadbVersion }} - imagePullPolicy: {{ .Values.pullPolicy }} - name: "clamp-mariadb" - env: - - name: MYSQL_ROOT_PASSWORD - value: {{ .Values.mysqlPassword }} - volumeMounts: - - mountPath: /docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh - name: docker-entrypoint-initdb - subPath: load-sql-files-tests-automation.sh - - mountPath: /docker-entrypoint-initdb.d/drop/ - name: docker-entrypoint-clds-drop - - mountPath: /docker-entrypoint-initdb.d/bulkload/ - name: docker-entrypoint-bulkload - - mountPath: /etc/mysql/conf.d/conf1/ - name: clamp-mariadb-conf - - mountPath: /var/lib/mysql - name: clamp-mariadb-data - readinessProbe: - tcpSocket: - port: 3306 - initialDelaySeconds: 5 - periodSeconds: 10 - volumes: - - name: docker-entrypoint-initdb - configMap: - name: clamp-entrypoint-initdb-configmap - - name: docker-entrypoint-clds-drop - configMap: - name: clamp-entrypoint-drop-configmap - - name: docker-entrypoint-bulkload - configMap: - name: clamp-entrypoint-bulkload-configmap - - name: clamp-mariadb-conf - configMap: - name: clamp-mariadb-conf-configmap - - name: clamp-mariadb-data - persistentVolumeClaim: - claimName: clamp-db - imagePullSecrets: - - name: {{ .Values.nsPrefix }}-docker-registry-key -#{{ end }} diff --git a/kubernetes/clamp/templates/clamp-pv-pvc.yaml b/kubernetes/clamp/templates/clamp-pv-pvc.yaml deleted file mode 100644 index c542de6026..0000000000 --- a/kubernetes/clamp/templates/clamp-pv-pvc.yaml +++ /dev/null @@ -1,32 +0,0 @@ -#{{ if not .Values.disableClampClampMariadb }} -apiVersion: v1 -kind: PersistentVolume -metadata: - name: "{{ .Values.nsPrefix }}-clamp" - namespace: "{{ .Values.nsPrefix }}" - labels: - name: "{{ .Values.nsPrefix }}-clamp" -spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteMany - persistentVolumeReclaimPolicy: Retain - hostPath: - path: {{ .Values.dataRootDir }}/{{ .Values.nsPrefix }}/clamp/mariadb/data ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: clamp-db - namespace: "{{ .Values.nsPrefix }}" -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Gi - selector: - matchLabels: - name: "{{ .Values.nsPrefix }}-clamp" -#{{ end }} diff --git a/kubernetes/clamp/templates/configmap.yaml b/kubernetes/clamp/templates/configmap.yaml new file mode 100644 index 0000000000..1de3b2f652 --- /dev/null +++ b/kubernetes/clamp/templates/configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + spring_application_json: {{ .Values.config.springApplicationJson | quote }} diff --git a/kubernetes/clamp/templates/deployment.yaml b/kubernetes/clamp/templates/deployment.yaml new file mode 100644 index 0000000000..d701e25d3a --- /dev/null +++ b/kubernetes/clamp/templates/deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ include "common.name" . }} + release: {{ .Release.Name }} + spec: + initContainers: + - command: + - /root/ready.py + args: + - --container-name + - {{ .Values.mariadb.nameOverride }} + env: + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}" + imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} + name: {{ include "common.name" . }}-readiness + containers: + - name: {{ include "common.name" . }} + image: "{{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }}" + imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.internalPort }} + # disable liveness probe when breakpoints set in debugger + # so K8s doesn't restart unresponsive container + {{- if eq .Values.liveness.enabled true }} + livenessProbe: + tcpSocket: + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.liveness.periodSeconds }} + {{ end -}} + readinessProbe: + tcpSocket: + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.readiness.periodSeconds }} + env: + - name: SPRING_APPLICATION_JSON + valueFrom: + configMapKeyRef: + name: {{ template "common.fullname" . }} + key: spring_application_json + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 10 }} + {{- end -}} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 10 }} + {{- end }} + imagePullSecrets: + - name: "{{ include "common.namespace" . }}-docker-registry-key" diff --git a/kubernetes/clamp/templates/service.yaml b/kubernetes/clamp/templates/service.yaml new file mode 100644 index 0000000000..91485a1eec --- /dev/null +++ b/kubernetes/clamp/templates/service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.service.type }} + ports: + {{if eq .Values.service.type "NodePort" -}} + - port: {{ .Values.service.internalPort }} + nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }} + name: {{ .Values.service.name }} + {{- else -}} + - port: {{ .Values.service.externalPort }} + targetPort: {{ .Values.service.internalPort }} + name: {{ .Values.service.name }} + {{- end}} + selector: + app: {{ include "common.name" . }} + release: {{ .Release.Name }} diff --git a/kubernetes/clamp/values.yaml b/kubernetes/clamp/values.yaml index 103472afd9..e077531015 100644 --- a/kubernetes/clamp/values.yaml +++ b/kubernetes/clamp/values.yaml @@ -1,13 +1,85 @@ -nsPrefix: onap +################################################################# +# Global configuration defaults. +################################################################# +global: # global defaults + nodePortPrefix: 302 + repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ== + readinessRepository: oomk8s + readinessImage: readiness-check:1.0.0 + loggingRepository: docker.elastic.co + loggingImage: beats/filebeat:5.5.0 + +subChartsOnly: + enabled: true + +# application image +repository: nexus3.onap.org:10001 +image: onap/clamp pullPolicy: Always -nodePortPrefix: 302 -mysqlPassword: strong_pitchou -dataRootDir: /dockerdata-nfs -clampReplicas: 1 -clampMariaDbReplicas: 1 -image: - readiness: oomk8s/readiness-check:1.1.0 - clampImage: nexus3.onap.org:10001/onap/clamp - clampVersion: v1.1.0 - mariadbImage: nexus3.onap.org:10001/mariadb - mariadbVersion: 10.1.11 + +# flag to enable debugging - application support required +debugEnabled: false + +################################################################# +# Application configuration defaults. +################################################################# +config: + mysqlPassword: strong_pitchou + dataRootDir: /dockerdata-nfs + springApplicationJson: '{ + "spring.datasource.camunda.url": "jdbc:mariadb:sequential://clamp-mariadb.{{include "common.namespace" .}}:3306/camundabpm?verifyServerCertificate=false&useSSL=false&requireSSL=false&autoReconnect=true&retriesAllDown=2147483647&failoverLoopRetries=2147483647", + "spring.datasource.cldsdb.url": "jdbc:mariadb:sequential://clamp-mariadb.{{include "common.namespace" .}}:3306/cldsdb4?verifyServerCertificate=false&useSSL=false&requireSSL=false&autoReconnect=true&retriesAllDown=2147483647&failoverLoopRetries=2147483647" + }' + +# subchart configuration +mariadb: + nameOverride: clampdb + + +# default number of instances +replicaCount: 1 + +nodeSelector: {} + +affinity: {} + +# probe configuration parameters +liveness: + initialDelaySeconds: 10 + periodSeconds: 10 + # necessary to disable liveness probe when setting breakpoints + # in debugger so K8s doesn't restart unresponsive container + enabled: true + +readiness: + initialDelaySeconds: 10 + periodSeconds: 10 + + +service: + type: NodePort + name: clamp + internalPort: 8080 + nodePort: 95 + +ingress: + enabled: false + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # + # Example: + # Configure resource requests and limits + # ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # Minimum memory for development is 2 CPU cores and 4GB memory + # Minimum memory for production is 4 CPU cores and 8GB memory +#resources: +# limits: +# cpu: 2 +# memory: 4Gi +# requests: +# cpu: 2 +# memory: 4Gi diff --git a/kubernetes/common/values.yaml b/kubernetes/common/values.yaml index 112875753d..af36d66b33 100644 --- a/kubernetes/common/values.yaml +++ b/kubernetes/common/values.yaml @@ -13,7 +13,7 @@ global: # readiness check readinessRepository: oomk8s - readinessImage: readiness-check:1.0.0 + readinessImage: readiness-check:1.1.0 # logging agent loggingRepository: docker.elastic.co @@ -28,4 +28,4 @@ global: mountPath: /dockerdata-nfs # flag to enable debugging - application support required - debugEnabled: true
\ No newline at end of file + debugEnabled: true diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/bin/kubectl b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/bin/kubectl Binary files differdeleted file mode 100755 index d53ce5f7f2..0000000000 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/bin/kubectl +++ /dev/null diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-script.sh deleted file mode 100755 index fce0a63015..0000000000 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-script.sh +++ /dev/null @@ -1,17 +0,0 @@ -if curl -s -X PUT http://elasticsearch.namespace-placeholder:9200/searchhealth/stats/testwrite -d @/consul/config/scripts/aai-search-storage-write-doc.txt | grep '\"created\":true'; then - if curl -s -X DELETE http://elasticsearch.namespace-placeholder:9200/searchhealth/stats/testwrite | grep '\"failed\":0'; then - if curl -s -X GET http://elasticsearch.namespace-placeholder:9200/searchhealth/stats/testwrite | grep '\"found\":false'; then - echo Successful PUT, DELETE, GET from Search Document Storage 2>&1 - exit 0 - else - echo Failed GET from Search Document Storage 2>&1 - exit 1 - fi - else - echo Failed DELETE from Search Document Storage 2>&1 - exit 1 - fi -else - echo Failed PUT from Search Document Storage 2>&1 - exit 1 -fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/appc-dbhost-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/appc-dbhost-script.sh deleted file mode 100755 index 9abfd49ca7..0000000000 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/appc-dbhost-script.sh +++ /dev/null @@ -1,13 +0,0 @@ -APPC_DBHOST_POD=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "appc-dbhost-[^[:space:]]*") -if [ -n "$APPC_DBHOST_POD" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $APPC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then - echo Success. APPC DBHost is running. 2>&1 - exit 0 - else - echo Failed. APPC DBHost is not running. 2>&1 - exit 1 - fi -else - echo Failed. APPC DBHost is offline. 2>&1 - exit 1 -fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/gremlin-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/gremlin-script.sh deleted file mode 100755 index e55c90fdc1..0000000000 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/gremlin-script.sh +++ /dev/null @@ -1,16 +0,0 @@ - -NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "gremlin[^[:space:]]*") - -if [ -n "$NAME" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- ps -efww | grep 'java' | grep 'gremlin-server' > /dev/null; then - - echo Success. Gremlin Server process is running. 2>&1 - exit 0 - else - echo Failed. Gremlin Server process is not running. 2>&1 - exit 1 - fi -else - echo Failed. Gremlin Server container is offline. 2>&1 - exit 1 -fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh deleted file mode 100755 index 8a523ce0b5..0000000000 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh +++ /dev/null @@ -1,13 +0,0 @@ -SDNC_DBHOST_POD=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "sdnc-dbhost-[^[:space:]]*") -if [ -n "$SDNC_DBHOST_POD" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then - echo Success. SDNC DBHost is running. 2>&1 - exit 0 - else - echo Failed. SDNC DBHost is not running. 2>&1 - exit 1 - fi -else - echo Failed. SDNC DBHost is offline. 2>&1 - exit 1 -fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-data-router-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-data-router-health.json index a60203694a..be41934e77 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-data-router-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/aai-data-router-health.json @@ -5,7 +5,7 @@ { "id": "data-router-process", "name": "Synapse Presence", - "script": "/consul/config/scripts/data-router-script.sh", + "script": "/consul/scripts/data-router-script.sh", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-hbase-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json index 1d23b88e0f..6b42e0cc4a 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-hbase-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json @@ -5,7 +5,7 @@ { "id": "hbase-aai", "name": "HBase Health Check", - "http": "http://hbase.namespace-placeholder:8080/status/cluster", + "http": "http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster", "method": "GET", "header": { "Cache-Control": ["no-cache"], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-model-loader-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-model-loader-health.json index 4e2e305afd..044a844e35 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-model-loader-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/aai-model-loader-health.json @@ -5,7 +5,7 @@ { "id": "model-loader-process", "name": "Model Loader Presence", - "script": "/consul/config/scripts/model-loader-script.sh", + "script": "/consul/scripts/model-loader-script.sh", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-search-data-service-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json index bf8830f0df..2a111d66eb 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-search-data-service-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json @@ -5,26 +5,26 @@ { "id": "elasticsearch", "name": "Search Data Service Document Store", - "http": "http://aai-elasticsearch.namespace-placeholder:9200/_cat/indices?v", + "http": "http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/_cat/indices?v", "interval": "15s", "timeout": "1s" }, { "id": "elasticsearch-write-health", "name": "Search Data Service Document Store Write Test", - "script": "/consul/config/scripts/aai-search-storage-write-script.sh", + "script": "/consul/scripts/aai-search-storage-write-script.sh", "interval": "60s" }, { "id": "search-data-service-availability", "name": "Search Data Service Availability", - "script": "curl -k --cert /consul/config/certs/client-cert-onap.crt.pem --cert-type PEM --key /consul/config/bin/client-cert-onap.key.pem --key-type PEM https://search-data-service.namespace-placeholder:9509/services/search-data-service/v1/jaxrsExample/jaxrs-services/echo/up 2>&1 | grep 'Up'", + "script": "curl -k --cert /consul/certs/client-cert-onap.crt.pem --cert-type PEM --key /consul/certs/client-cert-onap.key.pem --key-type PEM https://search-data-service.{{ .Values.nsPrefix }}:9509/services/search-data-service/v1/jaxrsExample/jaxrs-services/echo/up 2>&1 | grep 'Up'", "interval": "15s" }, { "id": "search-data-service-api", "name": "Search Data Service Operational Test", - "script": "/consul/config/scripts/search-data-service-availability.sh", + "script": "/consul/scripts/search-data-service-availability.sh", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-services-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json index 99d964e7d8..0274cd53c7 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-services-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json @@ -5,7 +5,7 @@ { "id": "aai-service", "name": "Core A&AI", - "http": "https://aai-service.namespace-placeholder:8443/aai/util/echo", + "http": "https://aai-service.{{ .Values.nsPrefix }}:8443/aai/util/echo", "header": { "Authorization": ["Basic QUFJOkFBSQ=="], "X-TransactionId": ["ConsulHealthCheck"], @@ -18,7 +18,7 @@ { "id": "aai-resources", "name": "Resources Microservice", - "http": "https://aai-resources.namespace-placeholder:8447/aai/util/echo", + "http": "https://aai-resources.{{ .Values.nsPrefix }}:8447/aai/util/echo", "header": { "Authorization": ["Basic QUFJOkFBSQ=="], "X-TransactionId": ["ConsulHealthCheck"], @@ -31,7 +31,7 @@ { "id": "aai-traversal", "name": "Traversal Microservice", - "http": "https://aai-traversal.namespace-placeholder:8446/aai/util/echo", + "http": "https://aai-traversal.{{ .Values.nsPrefix }}:8446/aai/util/echo", "header": { "Authorization": ["Basic QUFJOkFBSQ=="], "X-TransactionId": ["ConsulHealthCheck"], @@ -40,13 +40,6 @@ "tls_skip_verify": true, "interval": "15s", "timeout": "1s" - }, - { - "id": "gremlin-server", - "name": "Graph Data Store", - "script": "/consul/config/scripts/gremlin-script.sh", - "interval": "15s", - "timeout": "1s" } ] } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-sparky-be-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-sparky-be-health.json index 6af58dbf4f..bf6305c1d0 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-sparky-be-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/aai-sparky-be-health.json @@ -5,7 +5,7 @@ { "id": "sparky-be-process", "name": "UI Backend Presence", - "script": "/consul/config/scripts/sparky-be-script.sh", + "script": "/consul/scripts/sparky-be-script.sh", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-tabular-backend-health.json b/kubernetes/consul/resources/config/consul-agent-config/aai-tabular-backend-health.json index f76b33b3d8..c7fc19b2ee 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-tabular-backend-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/aai-tabular-backend-health.json @@ -5,7 +5,7 @@ { "id": "tabular-backend", "name": "Tabular Data Store Operational Test", - "script": "/consul/config/scripts/tabular-db-availability.sh", + "script": "/consul/scripts/tabular-db-availability.sh", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-dbbuilder.json b/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json index a6c3467855..9505246c25 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-dbbuilder.json +++ b/kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json @@ -5,7 +5,7 @@ { "id": "appc-dgbuilder", "name": "APPC-Dgbuilder Server Health Check", - "http": "http://appc-dgbuilder.namespace-placeholder:3000/", + "http": "http://appc-dgbuilder.{{ .Values.nsPrefix }}:3000/", "method": "HEAD", "header": { "Authorization": ["Basic ZGd1c2VyOnRlc3QxMjM="], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-health.json b/kubernetes/consul/resources/config/consul-agent-config/appc-health.json index 9d09a59982..86f2ce8da0 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/appc-health.json @@ -5,7 +5,7 @@ { "id": "appc-dbhost-healthcheck", "name": "APPC DBHost Health Check", - "script": "/consul/config/scripts/appc-dbhost-script.sh", + "script": "/consul/scripts/appc-dbhost-script.sh", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb01-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json index 31f1b253f6..f36251a78f 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb01-healthcheck.json +++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json @@ -3,9 +3,9 @@ "name": "Health Check: APPC-SDN-CTL-DB-01", "checks": [ { - "id": "appc-sdnctldb01.namespace-placeholder", + "id": "appc-sdnctldb01.{{ .Values.nsPrefix }}", "name": "APPC SDNCTLDB01 Health Check", - "tcp": "appc-sdnctldb01.namespace-placeholder:3306", + "tcp": "appc-sdnctldb01.{{ .Values.nsPrefix }}:3306", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb02-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json index 559e5a8cfe..8c8171fe5f 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb02-healthcheck.json +++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json @@ -3,9 +3,9 @@ "name": "Health Check: APPC-SDN-CTL-DB-02", "checks": [ { - "id": "appc-sdnctldb02.namespace-placeholder", + "id": "appc-sdnctldb02.{{ .Values.nsPrefix }}", "name": "APPC SDNCTLDB02 Health Check", - "tcp": "appc-sdnctldb02.namespace-placeholder:3306", + "tcp": "appc-sdnctldb02.{{ .Values.nsPrefix }}:3306", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnhost.json b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json index 09b50077d7..ec6db9d0cb 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnhost.json +++ b/kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json @@ -5,7 +5,7 @@ { "id": "appc-sdnhost", "name": "APPC SDN Host Health Check", - "http": "http://appc-sdnhost.namespace-placeholder:8282/apidoc/explorer/index.html", + "http": "http://appc-sdnhost.{{ .Values.nsPrefix }}:8282/apidoc/explorer/index.html", "method": "HEAD", "header": { "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/certs/client-cert-onap.crt.pem b/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.crt.pem index 5696aa3570..5696aa3570 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/certs/client-cert-onap.crt.pem +++ b/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.crt.pem diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/certs/client-cert-onap.key.pem b/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.key.pem index c7e386e55f..c7e386e55f 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/certs/client-cert-onap.key.pem +++ b/kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.key.pem diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-elastic-search.json b/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json index 3586f58932..d14b16487f 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-elastic-search.json +++ b/kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json @@ -5,7 +5,7 @@ { "id": "log-elasticsearch-server", "name": "Log Elastic Search Health Check", - "http": "http://elasticsearch.namespace-placeholder:9200/_cluster/health?pretty", + "http": "http://elasticsearch.{{ .Values.nsPrefix }}:9200/_cluster/health?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -14,7 +14,7 @@ { "id": "log-elasticsearch-tcp", "name": "Log Elastic Search TCP Health Check", - "tcp": "elasticsearchtcp.namespace-placeholder:9300", + "tcp": "elasticsearchtcp.{{ .Values.nsPrefix }}:9300", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-kibana.json b/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json index 7fda31f8c0..cc72bbf3e2 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-kibana.json +++ b/kubernetes/consul/resources/config/consul-agent-config/log-kibana.json @@ -5,7 +5,7 @@ { "id": "log-kibana-server", "name": "Log kibana Health Check", - "http": "http://kibana.namespace-placeholder:5601/status", + "http": "http://kibana.{{ .Values.nsPrefix }}:5601/status", "method": "HEAD", "tls_skip_verify": true, "interval": "15s", diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-logstash.json b/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json index e8e6236359..9eb60fdf8a 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-logstash.json +++ b/kubernetes/consul/resources/config/consul-agent-config/log-logstash.json @@ -5,7 +5,7 @@ { "id": "log-logstash-internal-server-gi", "name": "Log Stash Health Check - General Information", - "http": "http://logstashinternal.namespace-placeholder:9600/?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -14,7 +14,7 @@ { "id": "log-logstash-internal-server-node-info", "name": "Log Stash Health Check - Node Information", - "http": "http://logstashinternal.namespace-placeholder:9600/_node/?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -23,7 +23,7 @@ { "id": "log-logstash-internal-server-os-info", "name": "Log Stash Health Check - OS Information", - "http": "http://logstashinternal.namespace-placeholder:9600/_node/os?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/os?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -32,7 +32,7 @@ { "id": "log-logstash-internal-server-jvm-info", "name": "Log Stash Health Check - JVM Information", - "http": "http://logstashinternal.namespace-placeholder:9600/_node/jvm?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/jvm?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -41,7 +41,7 @@ { "id": "log-logstash-internal-server-plugin-info", "name": "Log Stash Health Check - Plugin Information", - "http": "http://logstashinternal.namespace-placeholder:9600/_node/plugins?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/plugins?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -50,7 +50,7 @@ { "id": "log-logstash-internal-server-node-stat", "name": "Log Stash Health Check - Node Stats", - "http": "http://logstashinternal.namespace-placeholder:9600/_node/stats?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -59,7 +59,7 @@ { "id": "log-logstash-internal-server-jvm-stat", "name": "Log Stash Health Check - JVM Stats", - "http": "http://logstashinternal.namespace-placeholder:9600/_node/stats/jvm?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/jvm?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -68,7 +68,7 @@ { "id": "log-logstash-internal-server-process-stat", "name": "Log Stash Health Check - Process Stats", - "http": "http://logstashinternal.namespace-placeholder:9600/_node/stats/process?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/process?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -77,7 +77,7 @@ { "id": "log-logstash-internal-server-os-stat", "name": "Log Stash Health Check - OS Stats", - "http": "http://logstashinternal.namespace-placeholder:9600/_node/stats/os?pretty", + "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/os?pretty", "method": "GET", "tls_skip_verify": true, "interval": "15s", @@ -86,7 +86,7 @@ { "id": "log-logstash-tcp", "name": "Log Stash File Beat TCP Health Check", - "tcp": "logstash.namespace-placeholder:5044", + "tcp": "logstash.{{ .Values.nsPrefix }}:5044", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/model-loader.properties b/kubernetes/consul/resources/config/consul-agent-config/model-loader.properties index b2db044417..b2db044417 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/model-loader.properties +++ b/kubernetes/consul/resources/config/consul-agent-config/model-loader.properties diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-dmaap-health.json b/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json index dddd9692b9..ee0d90e30a 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-dmaap-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json @@ -2,7 +2,7 @@ "service": { "name": "Health Check: Message Router - DMaaP", "check": { - "http": "http://dmaap.namespace-placeholder:3904/topics", + "http": "http://dmaap.{{ .Values.nsPrefix }}:3904/topics", "interval": "30s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-kafka-health.json b/kubernetes/consul/resources/config/consul-agent-config/mr-kafka-health.json index 128cf94460..df3b190726 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-kafka-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/mr-kafka-health.json @@ -2,7 +2,7 @@ "service": { "name": "Health Check: Message Router - Kafka", "check": { - "script": "/consul/config/scripts/mr-kafka-health.sh", + "script": "/consul/scripts/mr-kafka-health.sh", "interval": "30s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-zookeeper-health.json b/kubernetes/consul/resources/config/consul-agent-config/mr-zookeeper-health.json index 5268747349..36d295c1ef 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-zookeeper-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/mr-zookeeper-health.json @@ -2,7 +2,7 @@ "service": { "name": "Health Check: Message Router - ZooKeeper", "check": { - "script": "/consul/config/scripts/mr-zookeeper-health.sh", + "script": "/consul/scripts/mr-zookeeper-health.sh", "interval": "30s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json b/kubernetes/consul/resources/config/consul-agent-config/msb-health.json index 02e7dc47f4..d15c2ef829 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/msb-health.json @@ -3,34 +3,34 @@ "name": "Health Check: MSB", "checks": [ { - "id": "msb-eag.namespace-placeholder", + "id": "msb-eag.{{ .Values.nsPrefix }}", "name": "MSB eag Health Check", - "http": "http://msb-eag.namespace-placeholder:80/iui/microservices/default.html", + "http": "http://msb-eag.{{ .Values.nsPrefix }}:80/iui/microservices/default.html", "method": "HEAD", "tls_skip_verify": true, "interval": "15s", "timeout": "1s" }, { - "id": "msb-iag.namespace-placeholder", + "id": "msb-iag.{{ .Values.nsPrefix }}", "name": "MSB iag Health Check", - "http": "http://msb-iag.namespace-placeholder:80/iui/microservices/default.html", + "http": "http://msb-iag.{{ .Values.nsPrefix }}:80/iui/microservices/default.html", "method": "HEAD", "tls_skip_verify": true, "interval": "15s", "timeout": "1s" }, { - "id": "msb-consul.namespace-placeholder", + "id": "msb-consul.{{ .Values.nsPrefix }}", "name": "MSB consul Health Check", - "tcp": "msb-consul.namespace-placeholder:8500", + "tcp": "msb-consul.{{ .Values.nsPrefix }}:8500", "interval": "15s", "timeout": "1s" }, { - "id": "msb-discovery.namespace-placeholder", + "id": "msb-discovery.{{ .Values.nsPrefix }}", "name": "MSB discovery Health Check", - "tcp": "msb-discovery.namespace-placeholder:10081", + "tcp": "msb-discovery.{{ .Values.nsPrefix }}:10081", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-health.json b/kubernetes/consul/resources/config/consul-agent-config/mso-health.json index 354cd9ebab..1df7714a06 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/mso-health.json @@ -5,21 +5,21 @@ { "id": "mso-api-healthcheck", "name": "MSO API Health Check", - "script": "/consul/config/scripts/mso-api-script.sh", + "script": "/consul/scripts/mso-api-script.sh", "interval": "10s", "timeout": "1s" }, { "id": "mso-camunda-healthcheck", "name": "MSO Camunda Health Check", - "script": "/consul/config/scripts/mso-camunda-script.sh", + "script": "/consul/scripts/mso-camunda-script.sh", "interval": "10s", "timeout": "1s" }, { "id": "mso-jra-healthcheck", "name": "MSO JRA Health Check", - "script": "/consul/config/scripts/mso-jra-script.sh", + "script": "/consul/scripts/mso-jra-script.sh", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-mariabdb.json b/kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json index 710f4a8499..54bd2ef1fe 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-mariabdb.json +++ b/kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json @@ -5,7 +5,7 @@ { "id": "mso-mariadb", "name": "MSO Mariadb Health Check", - "script": "/consul/config/scripts/mso-mariadb-script.sh", + "script": "/consul/scripts/mso-mariadb-script.sh", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/multicloud-health-check.json b/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json index 760077c543..dba7c77307 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/multicloud-health-check.json +++ b/kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json @@ -5,7 +5,7 @@ { "id": "framework", "name": "Framework Health Check", - "http": "http://framework.namespace-placeholder:9001/api/multicloud/v0/swagger.json", + "http": "http://framework.{{ .Values.nsPrefix }}:9001/api/multicloud/v0/swagger.json", "method": "HEAD", "header": { "Cache-Control": ["no-cache"], @@ -19,7 +19,7 @@ { "id": "multicloud-ocata", "name": "Multicloud Ocata Health Check", - "http": "http://multicloud-ocata.namespace-placeholder:9006/api/multicloud-ocata/v0/swagger.json", + "http": "http://multicloud-ocata.{{ .Values.nsPrefix }}:9006/api/multicloud-ocata/v0/swagger.json", "method": "HEAD", "header": { "Cache-Control": ["no-cache"], @@ -33,7 +33,7 @@ { "id": "multicloud-vio", "name": "Multicloud Vio Health Check", - "http": "http://multicloud-vio.namespace-placeholder:9004/api/multicloud-vio/v0/swagger.json", + "http": "http://multicloud-vio.{{ .Values.nsPrefix }}:9004/api/multicloud-vio/v0/swagger.json", "method": "HEAD", "header": { "Cache-Control": ["no-cache"], @@ -47,7 +47,7 @@ { "id": "multicloud-windriver", "name": "Multicloud Windriver Health Check", - "http": "http://multicloud-windriver.namespace-placeholder:9005/api/multicloud-titanium_cloud/v0/swagger.json", + "http": "http://multicloud-windriver.{{ .Values.nsPrefix }}:9005/api/multicloud-titanium_cloud/v0/swagger.json", "method": "HEAD", "header": { "Cache-Control": ["no-cache"], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-doc.txt b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt index a6e084cfea..a6e084cfea 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-doc.txt +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh new file mode 100755 index 0000000000..3d26f6e71e --- /dev/null +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh @@ -0,0 +1,17 @@ +if curl -s -X PUT http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite -d @/consul/scripts/aai-search-storage-write-doc.txt | grep '\"created\":true'; then + if curl -s -X DELETE http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"failed\":0'; then + if curl -s -X GET http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"found\":false'; then + echo Successful PUT, DELETE, GET from Search Document Storage 2>&1 + exit 0 + else + echo Failed GET from Search Document Storage 2>&1 + exit 1 + fi + else + echo Failed DELETE from Search Document Storage 2>&1 + exit 1 + fi +else + echo Failed PUT from Search Document Storage 2>&1 + exit 1 +fi diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh new file mode 100755 index 0000000000..5f91c5e89c --- /dev/null +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh @@ -0,0 +1,13 @@ +APPC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "appc-dbhost-[^[:space:]]*") +if [ -n "$APPC_DBHOST_POD" ]; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $APPC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then + echo Success. APPC DBHost is running. 2>&1 + exit 0 + else + echo Failed. APPC DBHost is not running. 2>&1 + exit 1 + fi +else + echo Failed. APPC DBHost is offline. 2>&1 + exit 1 +fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/data-router-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh index 2c7a949987..035e7c8a38 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/data-router-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh @@ -1,8 +1,8 @@ -NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "data-router[^[:space:]]*") +NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-data-router[^[:space:]]*") if [ -n "$NAME" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- ps -efww | grep 'java' | grep 'data-router' > /dev/null; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'data-router' > /dev/null; then echo Success. Synapse process is running. 2>&1 exit 0 diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/model-loader-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh index 9f4f4a843d..9a4b4df28f 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/model-loader-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh @@ -1,8 +1,8 @@ -NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "model-loader[^[:space:]]*") +NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-model-loader[^[:space:]]*") if [ -n "$NAME" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- ps -efww | grep 'java' | grep 'model-loader' > /dev/null; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'model-loader' > /dev/null; then echo Success. Model Loader process is running. 2>&1 exit 0 diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mr-kafka-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh index cde6e8f335..a109032d3b 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mr-kafka-health.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh @@ -1,6 +1,6 @@ -kafkapod=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "global-kafka-[^[:space:]]*") +kafkapod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-global-kafka-[^[:space:]]*") if [ -n "$kafkapod" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $kafkapod -- ps ef | grep -i kafka; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $kafkapod -- ps ef | grep -i kafka; then echo Success. Kafka process is running. 2>&1 exit 0 else diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mr-zookeeper-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh index 2534fcba9e..47c42d54ef 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mr-zookeeper-health.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh @@ -1,6 +1,6 @@ -zkpod=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "zookeeper-[^[:space:]]*") +zkpod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-zookeeper-[^[:space:]]*") if [ -n "$zkpod" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $zkpod -- ps ef | grep -i zookeeper; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $zkpod -- ps ef | grep -i zookeeper; then echo Success. Zookeeper process is running. 2>&1 exit 0 else diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-api-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh index 459d2e2b34..8f3f85ce5e 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-api-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh @@ -1,5 +1,5 @@ ## Query the health check API. -HEALTH_CHECK_ENDPOINT="http://mso.namespace-placeholder:8080/ecomp/mso/infra/healthcheck" +HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/ecomp/mso/infra/healthcheck" HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT) READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready") diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-camunda-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh index 8342843dd8..341ff193e9 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-camunda-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh @@ -1,5 +1,5 @@ ## Query the health check API. -HEALTH_CHECK_ENDPOINT="http://mso.namespace-placeholder:8080/mso/healthcheck" +HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/mso/healthcheck" HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT) READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready") diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-jra-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh index c88ebe5a49..beeb289517 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-jra-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh @@ -1,5 +1,5 @@ ## Query the health check API. -HEALTH_CHECK_ENDPOINT="http://mso.namespace-placeholder:8080/networks/rest/healthcheck" +HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/networks/rest/healthcheck" HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT) READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready") diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh index cabaa4bc29..aa73a73050 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-mariadb-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh @@ -1,7 +1,7 @@ -NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "mso-mariadb[^[:space:]]*") +NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "mso-mariadb[^[:space:]]*") if [ -n "$NAME" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then echo Success. mariadb process is running. 2>&1 exit 0 else diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-be-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh index 66f35d9437..00a05648d3 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-be-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh @@ -1,5 +1,5 @@ ## Query the health check API. -HEALTH_CHECK_ENDPOINT="http://sdc-fe.namespace-placeholder:8181/sdc1/rest/healthCheck" +HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck" HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT) ## Strip out the ON_BOARDING section from the response XML (otherwise we will diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-cs-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh index cd50120581..9950cc9fdf 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-cs-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh @@ -1,5 +1,5 @@ ## Query the health check API. -HEALTH_CHECK_ENDPOINT="http://sdc-fe.namespace-placeholder:8181/sdc1/rest/healthCheck" +HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck" HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT) ## Strip out the ON_BOARDING section from the response XML (otherwise we will diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-fe-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh index 9799c31201..27f3b224c6 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-fe-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh @@ -1,5 +1,5 @@ ## Query the health check API. -HEALTH_CHECK_ENDPOINT="http://sdc-fe.namespace-placeholder:8181/sdc1/rest/healthCheck" +HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck" HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT) ## Strip out the ON_BOARDING section from the response XML (otherwise we will diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-titan-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh index bfa2068f29..c5955f3be3 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-titan-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh @@ -1,5 +1,5 @@ ## Query the health check API. -HEALTH_CHECK_ENDPOINT="http://sdc-fe.namespace-placeholder:8181/sdc1/rest/healthCheck" +HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck" HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT) ## Strip out the ON_BOARDING section from the response XML (otherwise we will diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh new file mode 100755 index 0000000000..27b9b9f608 --- /dev/null +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh @@ -0,0 +1,13 @@ +SDNC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "sdnc-dbhost-[^[:space:]]*") +if [ -n "$SDNC_DBHOST_POD" ]; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then + echo Success. SDNC DBHost is running. 2>&1 + exit 0 + else + echo Failed. SDNC DBHost is not running. 2>&1 + exit 1 + fi +else + echo Failed. SDNC DBHost is offline. 2>&1 + exit 1 +fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/search-data-service-availability.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh index ef96cca6a3..d5118736cd 100755..100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/search-data-service-availability.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh @@ -1,6 +1,6 @@ #!/bin/sh -SEARCH_SERVICE_NAME="search-data-service.namespace-placeholder" +SEARCH_SERVICE_NAME="search-data-service.{{ .Values.nsPrefix }}" SEARCH_SERVICE_PORT=9509 HEALTH_CHECK_INDEX="healthcheck" @@ -8,13 +8,11 @@ HEALTH_CHECK_INDEX="healthcheck" INDEX_URL="https://$SEARCH_SERVICE_NAME:$SEARCH_SERVICE_PORT/services/search-data-service/v1/search/indexes/$HEALTH_CHECK_INDEX" INDEX_SCHEMA="{\"fields\":[{\"name\": \"field1\", \"data-type\": \"string\"}]}" - -SEARCH_CERT_FILE="/consul/config/certs/client-cert-onap.crt.pem" -SEARCH_KEY_FILE="/consul/config/certs/client-cert-onap.key.pem" - +SEARCH_CERT_FILE="/consul/certs/client-cert-onap.crt.pem" +SEARCH_KEY_FILE="/consul/certs/client-cert-onap.key.pem" ## Try to create an index via the Search Data Service API. -CREATE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "$INDEX_SCHEMA" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X PUT $INDEX_URL) +CREATE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "$INDEX_SCHEMA" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X PUT $INDEX_URL) RESULT_STRING=" " @@ -33,13 +31,13 @@ else fi ## Now, clean up after ourselves. -DELETE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "{ }" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X DELETE $INDEX_URL) +DELETE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "{ }" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X DELETE $INDEX_URL) if [ $DELETE_INDEX_RESP -eq 200 ]; then RESULT_STRING="Service Is Able To Communicate With Back End" -else +else RESULT_STRING="Service API Failure - $DELETE_INDEX_RESP" - echo $RESULT_STRING + echo $RESULT_STRING exit 1 fi diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sparky-be-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh index 0791c5811d..7796681902 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sparky-be-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh @@ -1,8 +1,8 @@ -NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "aai-sparky-be[^[:space:]]*") +NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-sparky-be[^[:space:]]*") if [ -n "$NAME" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- ps -efww | grep 'java' | grep 'sparky' > /dev/null; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'sparky' > /dev/null; then echo Success. UI Backend Service process is running. 2>&1 exit 0 diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/tabular-db-availability.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh index ebec49769c..dc7768fc6e 100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/tabular-db-availability.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh @@ -1,6 +1,6 @@ # Query the Hbase service for the cluster status. -GET_CLUSTER_STATUS_RESPONSE=$(curl -si -X GET -H "Accept: text/xml" http://aai-hbase.namespace-placeholder:8080/status/cluster) +GET_CLUSTER_STATUS_RESPONSE=$(curl -si -X GET -H "Accept: text/xml" http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster) if [ -z "$GET_CLUSTER_STATUS_RESPONSE" ]; then echo "Tabular store is unreachable." diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/vid-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh index 96c7969fac..bbb080f44d 100644..100755 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/vid-mariadb-script.sh +++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh @@ -1,7 +1,7 @@ -NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "vid-mariadb[^[:space:]]*") +NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "vid-mariadb[^[:space:]]*") if [ -n "$NAME" ]; then - if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then + if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then echo Success. mariadb process is running. 2>&1 exit 0 else diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdc-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json index 374dcdfd62..ec8ec86c25 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdc-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/sdc-health.json @@ -5,35 +5,35 @@ { "id": "sdc-fe-healthcheck", "name": "SDC Front End Health Check", - "script": "/consul/config/scripts/sdc-fe-script.sh", + "script": "/consul/scripts/sdc-fe-script.sh", "interval": "10s", "timeout": "1s" }, { "id": "sdc-be-healthcheck", "name": "SDC Back End Health Check", - "script": "/consul/config/scripts/sdc-be-script.sh", + "script": "/consul/scripts/sdc-be-script.sh", "interval": "10s", "timeout": "1s" }, { "id": "sdc-titan-healthcheck", "name": "SDC Titan Health Check", - "script": "/consul/config/scripts/sdc-titan-script.sh", + "script": "/consul/scripts/sdc-titan-script.sh", "interval": "10s", "timeout": "1s" }, { "id": "sdc-cs-healthcheck", "name": "SDC Cassandra Health Check", - "script": "/consul/config/scripts/sdc-cs-script.sh", + "script": "/consul/scripts/sdc-cs-script.sh", "interval": "10s", "timeout": "1s" }, { "id": "sdc-catalog-healthcheck", "name": "SDC Catalog Health Check", - "http": "https://sdc-be.namespace-placeholder:8443/asdc/v1/catalog/services", + "http": "https://sdc-be.{{ .Values.nsPrefix }}:8443/asdc/v1/catalog/services", "header": { "Authorization": ["Basic dmlkOktwOGJKNFNYc3pNMFdYbGhhazNlSGxjc2UyZ0F3ODR2YW9HR21KdlV5MlU="], "X-ECOMP-InstanceID": ["VID"], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dbhost.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dbhost.json index 28d711b72f..ea0ae562e9 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dbhost.json +++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dbhost.json @@ -5,7 +5,7 @@ { "id": "sdnc-dbhost-healthcheck", "name": "SDNC DBHOST Health Check", - "script": "/consul/config/scripts/sdnc-dbhost-script.sh", + "script": "/consul/scripts/sdnc-dbhost-script.sh", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dgbuilder.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json index 6c293995eb..6ae14afac6 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dgbuilder.json +++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json @@ -5,7 +5,7 @@ { "id": "sdnc-dgbuilder", "name": "SDNC-DGbuilder Health Check", - "http": "http://sdnc-dgbuilder.namespace-placeholder:3000/", + "http": "http://sdnc-dgbuilder.{{ .Values.nsPrefix }}:3000/", "method": "HEAD", "header": { "Authorization": ["Basic ZGd1c2VyOnRlc3QxMjM="], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json index a127aa1d27..0ee5e8951d 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json @@ -5,7 +5,7 @@ { "id": "odl-api-healthcheck", "name": "SDNC API Health Check", - "http": "http://sdnhost.namespace-placeholder:8282/restconf/operations/SLI-API:healthcheck", + "http": "http://sdnhost.{{ .Values.nsPrefix }}:8282/restconf/operations/SLI-API:healthcheck", "method": "POST", "header": { "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-portal-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json index 07f2bb7b95..092df058e3 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-portal-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json @@ -5,7 +5,7 @@ { "id": "sdnc-portal", "name": "SDNC Portal Health Check", - "http": "http://sdnc-portal.namespace-placeholder:8843/login", + "http": "http://sdnc-portal.{{ .Values.nsPrefix }}:8843/login", "method": "HEAD", "header": { "Cache-Control": ["no-cache"], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb01-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json index 6b859c0894..ed4a29d7f6 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb01-healthcheck.json +++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json @@ -3,9 +3,9 @@ "name": "Health Check: SDNC-SDN-CTL-DB-01", "checks": [ { - "id": "sdnctldb01.namespace-placeholder", + "id": "sdnctldb01.{{ .Values.nsPrefix }}", "name": "SDNC SDNCTLDB01 Health Check", - "tcp": "sdnctldb01.namespace-placeholder:3306", + "tcp": "sdnctldb01.{{ .Values.nsPrefix }}:3306", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb02-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json index c6ad3beb93..8c4700b6f0 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb02-healthcheck.json +++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json @@ -3,9 +3,9 @@ "name": "Health Check: SDNC-SDN-CTL-DB-02", "checks": [ { - "id": "sdnctldb02.namespace-placeholder", + "id": "sdnctldb02.{{ .Values.nsPrefix }}", "name": "SDNC SDNCTLDB02 Health Check", - "tcp": "sdnctldb02.namespace-placeholder:3306", + "tcp": "sdnctldb02.{{ .Values.nsPrefix }}:3306", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnhost.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json index 9494810081..585b9c0b69 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnhost.json +++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json @@ -5,7 +5,7 @@ { "id": "sdnc-sdnhost", "name": "SDNC SDN Host Health Check", - "http": "http://sdnhost.namespace-placeholder:8282/apidoc/explorer/index.html", + "http": "http://sdnhost.{{ .Values.nsPrefix }}:8282/apidoc/explorer/index.html", "method": "HEAD", "header": { "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="], diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json b/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json index abc9e7f6b4..5ace3e4d9b 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/vfc-health.json @@ -3,107 +3,107 @@ "name": "Health Check: VFC", "checks": [ { - "id": "vfc-catalog.namespace-placeholder", + "id": "vfc-catalog.{{ .Values.nsPrefix }}", "name": "VFC catalog Health Check", - "tcp": "vfc-catalog.namespace-placeholder:8806", + "tcp": "vfc-catalog.{{ .Values.nsPrefix }}:8806", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-emsdriver.namespace-placeholder", + "id": "vfc-emsdriver.{{ .Values.nsPrefix }}", "name": "VFC emsdriver Health Check", - "tcp": "vfc-emsdriver.namespace-placeholder:8206", + "tcp": "vfc-emsdriver.{{ .Values.nsPrefix }}:8206", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-gvnfmdriver.namespace-placeholder", + "id": "vfc-gvnfmdriver.{{ .Values.nsPrefix }}", "name": "VFC gvnfmdriver Health Check", - "tcp": "vfc-gvnfmdriver.namespace-placeholder:8484", + "tcp": "vfc-gvnfmdriver.{{ .Values.nsPrefix }}:8484", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-hwvnfmdriver.namespace-placeholder", + "id": "vfc-hwvnfmdriver.{{ .Values.nsPrefix }}", "name": "VFC hwvnfmdriver Health Check", - "tcp": "vfc-hwvnfmdriver.namespace-placeholder:8482", + "tcp": "vfc-hwvnfmdriver.{{ .Values.nsPrefix }}:8482", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-jujudriver.namespace-placeholder", + "id": "vfc-jujudriver.{{ .Values.nsPrefix }}", "name": "VFC jujudriver Health Check", - "tcp": "vfc-jujudriver.namespace-placeholder:8483", + "tcp": "vfc-jujudriver.{{ .Values.nsPrefix }}:8483", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-nokiavnfmdriver.namespace-placeholder", + "id": "vfc-nokiavnfmdriver.{{ .Values.nsPrefix }}", "name": "VFC nokiavnfmdriver Health Check", - "tcp": "vfc-nokiavnfmdriver.namespace-placeholder:8486", + "tcp": "vfc-nokiavnfmdriver.{{ .Values.nsPrefix }}:8486", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-nslcm.namespace-placeholder", + "id": "vfc-nslcm.{{ .Values.nsPrefix }}", "name": "VFC nslcm Health Check", - "tcp": "vfc-nslcm.namespace-placeholder:8403", + "tcp": "vfc-nslcm.{{ .Values.nsPrefix }}:8403", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-resmgr.namespace-placeholder", + "id": "vfc-resmgr.{{ .Values.nsPrefix }}", "name": "VFC resmgr Health Check", - "tcp": "vfc-resmgr.namespace-placeholder:8480", + "tcp": "vfc-resmgr.{{ .Values.nsPrefix }}:8480", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-vnflcm.namespace-placeholder", + "id": "vfc-vnflcm.{{ .Values.nsPrefix }}", "name": "VFC vnflcm Health Check", - "tcp": "vfc-vnflcm.namespace-placeholder:8801", + "tcp": "vfc-vnflcm.{{ .Values.nsPrefix }}:8801", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-vnfmgr.namespace-placeholder", + "id": "vfc-vnfmgr.{{ .Values.nsPrefix }}", "name": "VFC vnfmgr Health Check", - "tcp": "vfc-vnfmgr.namespace-placeholder:8803", + "tcp": "vfc-vnfmgr.{{ .Values.nsPrefix }}:8803", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-vnfres.namespace-placeholder", + "id": "vfc-vnfres.{{ .Values.nsPrefix }}", "name": "VFC vnfres Health Check", - "tcp": "vfc-vnfres.namespace-placeholder:8802", + "tcp": "vfc-vnfres.{{ .Values.nsPrefix }}:8802", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-workflow.namespace-placeholder", + "id": "vfc-workflow.{{ .Values.nsPrefix }}", "name": "VFC workflow Health Check", - "tcp": "vfc-workflow.namespace-placeholder:10550", + "tcp": "vfc-workflow.{{ .Values.nsPrefix }}:10550", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-workflowengineactiviti.namespace-placeholder", + "id": "vfc-workflowengineactiviti.{{ .Values.nsPrefix }}", "name": "VFC workflow-engine Health Check", - "tcp": "vfc-workflowengineactiviti.namespace-placeholder:8080", + "tcp": "vfc-workflowengineactiviti.{{ .Values.nsPrefix }}:8080", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-ztesdncdriver.namespace-placeholder", + "id": "vfc-ztesdncdriver.{{ .Values.nsPrefix }}", "name": "VFC ztesdncdriver Health Check", - "tcp": "vfc-ztesdncdriver.namespace-placeholder:8411", + "tcp": "vfc-ztesdncdriver.{{ .Values.nsPrefix }}:8411", "interval": "15s", "timeout": "1s" }, { - "id": "vfc-ztevnfmdriver.namespace-placeholder", + "id": "vfc-ztevnfmdriver.{{ .Values.nsPrefix }}", "name": "VFC ztevnfmdriver Health Check", - "tcp": "vfc-ztevnfmdriver.namespace-placeholder:8410", + "tcp": "vfc-ztevnfmdriver.{{ .Values.nsPrefix }}:8410", "interval": "15s", "timeout": "1s" } diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vid-health.json b/kubernetes/consul/resources/config/consul-agent-config/vid-health.json index 39d71c4e5e..2dc6f0a9dc 100644 --- a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vid-health.json +++ b/kubernetes/consul/resources/config/consul-agent-config/vid-health.json @@ -5,7 +5,7 @@ { "id": "vid-server", "name": "VID Server Health Check", - "http": "http://vid-server.namespace-placeholder:8080/vid/healthCheck", + "http": "http://vid-server.{{ .Values.nsPrefix }}:8080/vid/healthCheck", "method": "GET", "header": { "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="], @@ -20,7 +20,7 @@ { "id": "vid-mariadb", "name": "Vid Mariadb Health Check", - "script": "/consul/config/scripts/vid-mariadb-script.sh", + "script": "/consul/scripts/vid-mariadb-script.sh", "interval": "10s", "timeout": "1s" } diff --git a/kubernetes/consul/templates/consul-agent-configmap.yaml b/kubernetes/consul/templates/consul-agent-configmap.yaml new file mode 100644 index 0000000000..0f37f50eea --- /dev/null +++ b/kubernetes/consul/templates/consul-agent-configmap.yaml @@ -0,0 +1,25 @@ +#{{ if not .Values.disableConsulConsulAgent }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: consul-agent-configmap + namespace: {{ .Values.nsPrefix }} +data: +{{ tpl (.Files.Glob "resources/config/consul-agent-config/*").AsConfig . | indent 2 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: consul-agent-scripts-configmap + namespace: {{ .Values.nsPrefix }} +data: +{{ tpl (.Files.Glob "resources/config/consul-agent-config/scripts/*").AsConfig . | indent 2 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: consul-agent-certs-secret + namespace: {{ .Values.nsPrefix }} +data: +{{ tpl (.Files.Glob "resources/config/consul-agent-config/certs/*").AsSecrets . | indent 2 }} +#{{ end }} diff --git a/kubernetes/consul/templates/consul-agent-deployment.yaml b/kubernetes/consul/templates/consul-agent-deployment.yaml index a2bbe6636f..8d780f0e58 100644 --- a/kubernetes/consul/templates/consul-agent-deployment.yaml +++ b/kubernetes/consul/templates/consul-agent-deployment.yaml @@ -18,15 +18,26 @@ spec: name: consul-agent spec: containers: - - image: "{{ .Values.consulimageRegistry }}:{{ .Values.consuldockerTag }}" + - image: "{{ .Values.consulagentRegistry }}" command: ["/usr/local/bin/docker-entrypoint.sh"] args: ["agent","-client","0.0.0.0","-enable-script-checks","-join","consul-server.{{ .Values.nsPrefix }}"] name: consul-server volumeMounts: - mountPath: /consul/config name: consul-agent-config + - mountPath: /consul/scripts + name: consul-agent-scripts-config + - mountPath: /consul/certs + name: consul-agent-certs-config volumes: - - hostPath: - path: {{ .Values.rootHostPath }}/{{ .Values.nsPrefix }}/consul/consul-agent-config + - configMap: + name: consul-agent-configmap name: consul-agent-config + - configMap: + name: consul-agent-scripts-configmap + defaultMode: 0755 + name: consul-agent-scripts-config + - secret: + secretName: consul-agent-certs-secret + name: consul-agent-certs-config #{{ end }} diff --git a/kubernetes/consul/templates/consul-server-deployment.yaml b/kubernetes/consul/templates/consul-server-deployment.yaml index 6aafee4cb2..706aa0eb9a 100644 --- a/kubernetes/consul/templates/consul-server-deployment.yaml +++ b/kubernetes/consul/templates/consul-server-deployment.yaml @@ -18,15 +18,8 @@ spec: name: consul-server spec: containers: - - image: "{{ .Values.consulimageRegistry }}:{{ .Values.consuldockerTag }}" + - image: "{{ .Values.consulserverRegistry }}" command: ["/usr/local/bin/docker-entrypoint.sh"] args: ["agent","-server","-client","0.0.0.0","-enable-script-checks","-bootstrap-expect=3","-ui","-join","consul-server.{{ .Values.nsPrefix }}"] name: consul-server - volumeMounts: - - mountPath: /consul/config - name: consul-server-config - volumes: - - hostPath: - path: {{ .Values.rootHostPath }}/{{ .Values.nsPrefix }}/consul/consul-server-config - name: consul-server-config #{{ end }} diff --git a/kubernetes/consul/values.yaml b/kubernetes/consul/values.yaml index a6909d7d21..3886727717 100644 --- a/kubernetes/consul/values.yaml +++ b/kubernetes/consul/values.yaml @@ -1,8 +1,7 @@ nsPrefix: "onap" nodePortPrefix: 302 -consuldockerTag: "latest" -rootHostPath: "/dockerdata-nfs" -consulimageRegistry: "docker.io/consul" +consulserverRegistry: "docker.io/consul:1.0.6" +consulagentRegistry: "oomk8s/consul:1.0.0" consulAgentReplicas: 1 consulServerReplicas: 3 service: diff --git a/kubernetes/dcaegen2/templates/pod.yaml b/kubernetes/dcaegen2/templates/pod.yaml index 1cf5d9b08d..1d0d421061 100644 --- a/kubernetes/dcaegen2/templates/pod.yaml +++ b/kubernetes/dcaegen2/templates/pod.yaml @@ -7,7 +7,7 @@ metadata: spec: containers: - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + image: "{{ .Values.image.repository }}" imagePullPolicy: {{ .Values.image.pullPolicy }} volumeMounts: - name: installer-config-mount diff --git a/kubernetes/dcaegen2/values.yaml b/kubernetes/dcaegen2/values.yaml index c892f69e68..a2c87f6c6b 100644 --- a/kubernetes/dcaegen2/values.yaml +++ b/kubernetes/dcaegen2/values.yaml @@ -1,6 +1,5 @@ nsPrefix: onap location: dg2 image: - repository: nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.bootstrap - tag: v1.1.0 - pullPolicy: Always
\ No newline at end of file + repository: nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.bootstrap:v1.1.0 + pullPolicy: Always diff --git a/kubernetes/helm/starters/onap-app/.helmignore b/kubernetes/helm/starters/onap-app/.helmignore new file mode 100644 index 0000000000..f0c1319444 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/kubernetes/helm/starters/onap-app/Chart.yaml b/kubernetes/helm/starters/onap-app/Chart.yaml new file mode 100644 index 0000000000..46f2377703 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +description: <Short application description - this is visible via 'helm search'> +name: <onap-app> +version: 2.0.0
\ No newline at end of file diff --git a/kubernetes/helm/starters/onap-app/README.md b/kubernetes/helm/starters/onap-app/README.md new file mode 100644 index 0000000000..897a07392a --- /dev/null +++ b/kubernetes/helm/starters/onap-app/README.md @@ -0,0 +1,14 @@ +# Starter Helm Chart for ONAP Applications + +Clone the onap-app directory and rename it to the name for your new Helm Chart. + +Helm Charts for specific applications should be moved into the oom/kubernetes +directory. If the application is a common reusable Helm Chart (eg. mariadb), a +more appropriate location might be the oom/kubernetes/common directory. + +Edit each yaml file in the new Helm Chart directoy, substituing real values +for those inside brackets (eg. `<onap-app>`). Some comments have been provided in +the file to help guide changes that need to be made. This starter Helm Chart is +in no way complete. It can serve as the basis for creating a new Helm Chart that +attempts to apply Helm best practices to ONAP applications being configured, +deployed and managed in Kubernetes. diff --git a/kubernetes/helm/starters/onap-app/requirements.yaml b/kubernetes/helm/starters/onap-app/requirements.yaml new file mode 100644 index 0000000000..acca8ef7e2 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: ~2.0.0 + repository: '@local'
\ No newline at end of file diff --git a/kubernetes/helm/starters/onap-app/resources/config/README.txt b/kubernetes/helm/starters/onap-app/resources/config/README.txt new file mode 100644 index 0000000000..5cc01497f5 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/resources/config/README.txt @@ -0,0 +1,10 @@ +This directory contains all external configuration files that +need to be mounted into an application container. + +See the configmap.yaml in the templates directory for an example +of how to load (ie map) config files from this directory, into +Kubernetes, for distribution within the k8s cluster. + +See deployment.yaml in the templates directory for an example +of how the 'config mapped' files are then mounted into the +containers. diff --git a/kubernetes/helm/starters/onap-app/resources/config/application.properties b/kubernetes/helm/starters/onap-app/resources/config/application.properties new file mode 100644 index 0000000000..496a15a531 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/resources/config/application.properties @@ -0,0 +1 @@ +sampleConfigKey=sampleConfigValue
\ No newline at end of file diff --git a/kubernetes/helm/starters/onap-app/templates/NOTES.txt b/kubernetes/helm/starters/onap-app/templates/NOTES.txt new file mode 100644 index 0000000000..2465e03634 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "common.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.externalPort }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }} +{{- end }} diff --git a/kubernetes/helm/starters/onap-app/templates/configmap.yaml b/kubernetes/helm/starters/onap-app/templates/configmap.yaml new file mode 100644 index 0000000000..5b8dde2bfa --- /dev/null +++ b/kubernetes/helm/starters/onap-app/templates/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.fullname" . }}-configmap + namespace: {{ include "common.namespace" . }} +data: +{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
\ No newline at end of file diff --git a/kubernetes/helm/starters/onap-app/templates/deployment.yaml b/kubernetes/helm/starters/onap-app/templates/deployment.yaml new file mode 100644 index 0000000000..2006b885f1 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/templates/deployment.yaml @@ -0,0 +1,90 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ include "common.name" . }} + release: {{ .Release.Name }} + spec: + initContainers: +#Example init container for dependency checking +# - command: +# - /root/ready.py +# args: +# - --container-name +# - mariadb +# env: +# - name: NAMESPACE +# valueFrom: +# fieldRef: +# apiVersion: v1 +# fieldPath: metadata.namespace +# image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}" +# imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} +# name: {{ include "common.name" . }}-readiness + containers: + - name: {{ include "common.name" . }} + image: "{{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }}" + imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.internalPort }} + # disable liveness probe when breakpoints set in debugger + # so K8s doesn't restart unresponsive container + {{- if eq .Values.liveness.enabled true }} + livenessProbe: + tcpSocket: + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.liveness.periodSeconds }} + {{ end -}} + readinessProbe: + tcpSocket: + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.readiness.periodSeconds }} + env: +#Example environment variable passed to container +# - name: DEBUG_FLAG +# value: {{ .Values.global.debugEnabled | default .Values.debugEnabled | quote }} + volumeMounts: + - mountPath: /etc/localtime + name: localtime + readOnly: true +#Example config file mount into container +# - mountPath: /opt/app/application.properties +# name: {{ include "common.name" . }}-config +# subPath: application.properties + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 10 }} + {{- end -}} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 10 }} + {{- end }} + + volumes: + - name: localtime + hostPath: + path: /etc/localtime +#Example config file mount into container +# - name: {{ include "common.fullname" . }}-config +# configMap: +# name: {{ include "common.fullname" . }}-configmap +# items: +# - key: application.properties +# path: application.properties + imagePullSecrets: + - name: "{{ include "common.namespace" . }}-docker-registry-key"
\ No newline at end of file diff --git a/kubernetes/helm/starters/onap-app/templates/service.yaml b/kubernetes/helm/starters/onap-app/templates/service.yaml new file mode 100644 index 0000000000..afa2e3b600 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/templates/service.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.fullname" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +# Example MSB registration annotation +# msb.onap.org/service-info: '[ +# { +# "serviceName": "so", +# "version": "v1", +# "url": "/ecomp/mso/infra", +# "protocol": "REST" +# "port": "8080", +# "visualRange":"1" +# } +# ]' +spec: + type: {{ .Values.service.type }} + ports: + {{if eq .Values.service.type "NodePort" -}} + - port: {{ .Values.service.externalPort }} + #Example internal target port if required + #targetPort: {{ .Values.service.internalPort }} + nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }} + name: {{ .Values.service.name }} + {{- else -}} + - port: {{ .Values.service.externalPort }} + targetPort: {{ .Values.service.internalPort }} + name: {{ .Values.service.name }} + {{- end}} + selector: + app: {{ include "common.name" . }} + release: {{ .Release.Name }}
\ No newline at end of file diff --git a/kubernetes/helm/starters/onap-app/values.yaml b/kubernetes/helm/starters/onap-app/values.yaml new file mode 100644 index 0000000000..4d1f44fa36 --- /dev/null +++ b/kubernetes/helm/starters/onap-app/values.yaml @@ -0,0 +1,78 @@ +################################################################# +# Global configuration defaults. +################################################################# +global: + nodePortPrefix: 302 + repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ== + readinessRepository: oomk8s + readinessImage: readiness-check:1.0.0 + loggingRepository: docker.elastic.co + loggingImage: beats/filebeat:5.5.0 + +################################################################# +# Application configuration defaults. +################################################################# +# application image +repository: nexus3.onap.org:10001 +image: <onap-app>:<1.2-STAGING-latest> +pullPolicy: Always + +# flag to enable debugging - application support required +debugEnabled: false + +# application configuration +# Example: +config: +# username: myusername +# password: mypassword + +# default number of instances +replicaCount: 1 + +nodeSelector: {} + +affinity: {} + +# probe configuration parameters +liveness: + initialDelaySeconds: 10 + periodSeconds: 10 + # necessary to disable liveness probe when setting breakpoints + # in debugger so K8s doesn't restart unresponsive container + enabled: true + +readiness: + initialDelaySeconds: 10 + periodSeconds: 10 + +service: + #Example service definition with external, internal and node ports. + #Services may use any combination of ports depending on the 'type' of + #service being defined. + type: NodePort + name: <onap-app> + externalPort: <8080> + internalPort: <80> + nodePort: <replace with unused node port suffix eg. 23> + +ingress: + enabled: false + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # + # Example: + # Configure resource requests and limits + # ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # Minimum memory for development is 2 CPU cores and 4GB memory + # Minimum memory for production is 4 CPU cores and 8GB memory +#resources: +# limits: +# cpu: 2 +# memory: 4Gi +# requests: +# cpu: 2 +# memory: 4Gi diff --git a/kubernetes/msb/values.yaml b/kubernetes/msb/values.yaml index 10e3be4230..d6c12eaaf6 100644 --- a/kubernetes/msb/values.yaml +++ b/kubernetes/msb/values.yaml @@ -19,13 +19,13 @@ iagPort: 80 iagPort_https: 443 iagNodePort: 30080 iagNodePort_https: 30443 -iagReplicas: 1 +iagReplicas: 2 eagPort: 80 eagPort_https: 443 eagNodePort: 30082 eagNodePort_https: 30446 -eagReplicas: 1 +eagReplicas: 2 kubeMasterUrl: https://kubernetes.default.svc.cluster.local:443 discoveryUrl: http://msb-discovery.{{ .Values.nsPrefix }}:10081 diff --git a/kubernetes/onap/requirements.yaml b/kubernetes/onap/requirements.yaml index a8fd3eb785..29331fbc0c 100644 --- a/kubernetes/onap/requirements.yaml +++ b/kubernetes/onap/requirements.yaml @@ -67,7 +67,7 @@ dependencies: repository: '@local' condition: portal.enabled - name: robot - version: ~0.1.0 + version: ~2.0.0 repository: '@local' condition: robot.enabled - name: sdc diff --git a/kubernetes/onap/values.yaml b/kubernetes/onap/values.yaml index 96455a0156..eb1eec4aac 100644 --- a/kubernetes/onap/values.yaml +++ b/kubernetes/onap/values.yaml @@ -64,7 +64,7 @@ policy: portal: enabled: false robot: # Robot Health Check - enabled: false + enabled: true sdc: enabled: false sdnc: diff --git a/kubernetes/robot/.helmignore b/kubernetes/robot/.helmignore index 586046af66..f0c1319444 100644 --- a/kubernetes/robot/.helmignore +++ b/kubernetes/robot/.helmignore @@ -1,7 +1,21 @@ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. - -# k8s scripts -ete-k8s.sh -demo-k8s.sh +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/kubernetes/robot/Chart.yaml b/kubernetes/robot/Chart.yaml index 1f59785ede..d37773dd61 100644 --- a/kubernetes/robot/Chart.yaml +++ b/kubernetes/robot/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 -description: A Helm chart for Kubernetes +description: A helm Chart for kubernetes-ONAP Robot name: robot -version: 0.1.0 +version: 2.0.0 diff --git a/kubernetes/robot/all-services.yaml b/kubernetes/robot/all-services.yaml deleted file mode 100644 index 1fbabe2df6..0000000000 --- a/kubernetes/robot/all-services.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: robot - name: robot -spec: - ports: - - port: 88 - nodePort: {{ .Values.nodePortPrefix }}09 - selector: - app: robot - type: NodePort diff --git a/kubernetes/robot/requirements.yaml b/kubernetes/robot/requirements.yaml new file mode 100644 index 0000000000..56029ab047 --- /dev/null +++ b/kubernetes/robot/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + - name: common + version: ~2.0.0 + # local reference to common chart, as it is + # a part of this chart's package and will not + # be published independently to a repo (at this point) + repository: '@local'
\ No newline at end of file diff --git a/kubernetes/robot/templates/NOTES.txt b/kubernetes/robot/templates/NOTES.txt new file mode 100644 index 0000000000..91d8ed42f1 --- /dev/null +++ b/kubernetes/robot/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.externalPort }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }} +{{- end }} diff --git a/kubernetes/robot/templates/all-services.yaml b/kubernetes/robot/templates/all-services.yaml deleted file mode 100644 index 604982b1e4..0000000000 --- a/kubernetes/robot/templates/all-services.yaml +++ /dev/null @@ -1,16 +0,0 @@ -#{{ if not .Values.disableRobotRobot }} -apiVersion: v1 -kind: Service -metadata: - labels: - app: robot - name: robot - namespace: "{{ .Values.nsPrefix }}" -spec: - ports: - - port: 88 - nodePort: {{ .Values.nodePortPrefix }}09 - selector: - app: robot - type: NodePort -#{{ end }}
\ No newline at end of file diff --git a/kubernetes/robot/templates/robot-dep-configmap.yaml b/kubernetes/robot/templates/configmap.yaml index 94e359dce7..fd26992273 100644 --- a/kubernetes/robot/templates/robot-dep-configmap.yaml +++ b/kubernetes/robot/templates/configmap.yaml @@ -1,25 +1,23 @@ -#{{ if not .Values.disableRobot }} apiVersion: v1 kind: ConfigMap metadata: - name: robot-eteshare-configmap - namespace: {{ .Values.nsPrefix }} + name: {{ include "common.name" . }}-eteshare-configmap + namespace: {{ include "common.namespace" . }} data: {{ tpl (.Files.Glob "resources/config/eteshare/config/*").AsConfig . | indent 2 }} --- apiVersion: v1 kind: ConfigMap metadata: - name: robot-resources-configmap - namespace: {{ .Values.nsPrefix }} + name: {{ include "common.name" . }}-resources-configmap + namespace: {{ include "common.namespace" . }} data: {{ tpl (.Files.Glob "resources/config/robot/resources/*").AsConfig . | indent 2 }} --- apiVersion: v1 kind: ConfigMap metadata: - name: lighttpd-authorization-configmap - namespace: {{ .Values.nsPrefix }} + name: {{ include "common.name" . }}-lighttpd-authorization-configmap + namespace: {{ include "common.namespace" . }} data: {{ tpl (.Files.Glob "resources/config/authorization").AsConfig . | indent 2 }} -#{{ end }} diff --git a/kubernetes/robot/templates/deployment.yaml b/kubernetes/robot/templates/deployment.yaml new file mode 100644 index 0000000000..596d622a4b --- /dev/null +++ b/kubernetes/robot/templates/deployment.yaml @@ -0,0 +1,82 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ include "common.name" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.robotReplicas }} + template: + metadata: + labels: + app: {{ include "common.name" . }} + release: {{ .Release.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }}" + imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }} + ports: + - containerPort: {{ .Values.service.internalPort }} + # disable liveness probe when breakpoints set in debugger + # so K8s doesn't restart unresponsive container + {{- if eq .Values.liveness.enabled true }} + livenessProbe: + tcpSocket: + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }} + periodSeconds: {{ .Values.liveness.periodSeconds }} + {{ end -}} + readinessProbe: + tcpSocket: + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }} + periodSeconds: {{ .Values.readiness.periodSeconds }} + volumeMounts: + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: robot-eteshare + mountPath: /share/config + - name: robot-resources + mountPath: /var/opt/OpenECOMP_ETE/robot/resources/asdc_interface.robot + subPath: asdc_interface.robot + - name: robot-resources + mountPath: /var/opt/OpenECOMP_ETE/robot/resources/policy_interface.robot + subPath: policy_interface.robot + - name: robot-resources + mountPath: /var/opt/OpenECOMP_ETE/robot/resources/sdngc_interface.robot + subPath: sdngc_interface.robot + - name: robot-lighttpd-authorization + mountPath: /etc/lighttpd/authorization + subPath: authorization + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 10 }} + {{- end -}} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 10 }} + {{- end }} + volumes: + - name: localtime + hostPath: + path: /etc/localtime + - name: robot-eteshare + configMap: + name: {{ include "common.name" . }}-eteshare-configmap + defaultMode: 0755 + - name: robot-resources + configMap: + name: {{ include "common.name" . }}-resources-configmap + - name: robot-lighttpd-authorization + configMap: + name: {{ include "common.name" . }}-lighttpd-authorization-configmap + imagePullSecrets: + - name: "{{ include "common.namespace" . }}-docker-registry-key" diff --git a/kubernetes/robot/templates/robot-deployment.yaml b/kubernetes/robot/templates/robot-deployment.yaml deleted file mode 100644 index 9f936e8275..0000000000 --- a/kubernetes/robot/templates/robot-deployment.yaml +++ /dev/null @@ -1,63 +0,0 @@ -#{{ if not .Values.disableRobotRobot }} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: robot - namespace: "{{ .Values.nsPrefix }}" -spec: - replicas: {{ .Values.robotReplicas }} - selector: - matchLabels: - app: robot - template: - metadata: - labels: - app: robot - name: robot - spec: - containers: - - image: {{ .Values.image.testsuite }} - imagePullPolicy: {{ .Values.pullPolicy }} - name: robot - volumeMounts: - - name: localtime - mountPath: /etc/localtime - readOnly: true - - name: robot-eteshare - mountPath: /share/config - - name: robot-resources - mountPath: /var/opt/OpenECOMP_ETE/robot/resources/asdc_interface.robot - subPath: asdc_interface.robot - - name: robot-resources - mountPath: /var/opt/OpenECOMP_ETE/robot/resources/policy_interface.robot - subPath: policy_interface.robot - - name: robot-resources - mountPath: /var/opt/OpenECOMP_ETE/robot/resources/sdngc_interface.robot - subPath: sdngc_interface.robot - - name: lighttpd-authorization - mountPath: /etc/lighttpd/authorization - subPath: authorization - ports: - - containerPort: 88 - readinessProbe: - tcpSocket: - port: 88 - initialDelaySeconds: 5 - periodSeconds: 10 - volumes: - - name: localtime - hostPath: - path: /etc/localtime - - name: robot-eteshare - configMap: - name: robot-eteshare-configmap - defaultMode: 0755 - - name: robot-resources - configMap: - name: robot-resources-configmap - - name: lighttpd-authorization - configMap: - name: lighttpd-authorization-configmap - imagePullSecrets: - - name: "{{ .Values.nsPrefix }}-docker-registry-key" -#{{ end }} diff --git a/kubernetes/robot/templates/service.yaml b/kubernetes/robot/templates/service.yaml new file mode 100644 index 0000000000..90d0ab8d18 --- /dev/null +++ b/kubernetes/robot/templates/service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.name" . }} + namespace: {{ include "common.namespace" . }} + labels: + app: {{ include "common.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.service.type }} + ports: + {{if eq .Values.service.type "NodePort" -}} + - port: {{ .Values.service.internalPort }} + nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.externalPort }} + name: {{ .Values.service.name }} + {{- else -}} + - port: {{ .Values.service.externalPort }} + targetPort: {{ .Values.service.internalPort }} + name: {{ .Values.service.name }} + {{- end}} + selector: + app: {{ include "common.name" . }} + release: {{ .Release.Name }} diff --git a/kubernetes/robot/values.yaml b/kubernetes/robot/values.yaml index ea5d2ae856..a2da2b71a8 100644 --- a/kubernetes/robot/values.yaml +++ b/kubernetes/robot/values.yaml @@ -1,7 +1,31 @@ -nsPrefix: onap +################################################################# +# Global configuration defaults. +################################################################# +global: # global defaults + nodePortPrefix: 302 + repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ== + readinessRepository: oomk8s + readinessImage: readiness-check:1.0.0 + loggingRepository: docker.elastic.co + loggingImage: beats/filebeat:5.5.0 + +subChartsOnly: + enabled: true + +# application image +repository: nexus3.onap.org:10001 +image: openecomp/testsuite:1.1-STAGING-latest pullPolicy: Always -nodePortPrefix: 302 -robotReplicas: 1 + +# flag to enable debugging - application support required +debugEnabled: false + +################################################################# +# Application configuration defaults. +################################################################# + +# openstack configuration + demoArtifactsVersion: "1.1.0-SNAPSHOT" openStackPrivateNetCidr: "192.168.30.0" openStackFlavourMedium: "m1.medium" @@ -15,5 +39,53 @@ openStackPrivateSubnetId: "e8f51956-00dd-4425-af36-045716781ffc" openStackTenantId: "47899782ed714295b1151681fdfd51f5" openStackUserName: "vnf_user" ubuntuImage: "Ubuntu_14.04.5_LTS" -image: - testsuite: nexus3.onap.org:10001/openecomp/testsuite:1.1-STAGING-latest + +# default number of instances +robotReplicas: 1 + +nodeSelector: {} + +affinity: {} + +# probe configuration parameters +liveness: + initialDelaySeconds: 10 + periodSeconds: 10 + # necessary to disable liveness probe when setting breakpoints + # in debugger so K8s doesn't restart unresponsive container + enabled: true + +readiness: + initialDelaySeconds: 10 + periodSeconds: 10 + + +service: + type: NodePort + name: robot + externalPort: "09" + internalPort: 88 + + +ingress: + enabled: false + + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # + # Example: + # Configure resource requests and limits + # ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # Minimum memory for development is 2 CPU cores and 4GB memory + # Minimum memory for production is 4 CPU cores and 8GB memory +#resources: +# limits: +# cpu: 2 +# memory: 4Gi +# requests: +# cpu: 2 +# memory: 4Gi diff --git a/kubernetes/so/values.yaml b/kubernetes/so/values.yaml index 4700bb64e5..b96a97ee7f 100644 --- a/kubernetes/so/values.yaml +++ b/kubernetes/so/values.yaml @@ -5,7 +5,7 @@ global: # global defaults nodePortPrefix: 302 repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ== readinessRepository: oomk8s - readinessImage: readiness-check:1.0.0 + readinessImage: readiness-check:1.1.0 loggingRepository: docker.elastic.co loggingImage: beats/filebeat:5.5.0 |