summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cloudify-onap/blueprint.yaml188
-rw-r--r--cloudify-onap/docker-custom-readiness/Dockerfile21
-rw-r--r--cloudify-onap/docker-custom-readiness/ready.py85
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/__init__.py14
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/app.py47
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/__init__.py14
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/constants.py20
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/deployment_result.py27
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/helm.py62
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/init_pod.py63
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/namespace.py (renamed from cloudify/scripts/onap/create_namespace.py)106
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/resources_services.py230
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/workarounds.py67
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/environment.py58
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/requirements.txt1
-rwxr-xr-xcloudify-onap/plugins/onap-installation-plugin/plugin.yaml87
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/requirements.txt1
-rwxr-xr-xcloudify-onap/plugins/onap-installation-plugin/setup.py40
-rw-r--r--cloudify/inputs/message-router-blueprint.yaml.example25
-rw-r--r--cloudify/scripts/configure_node.py49
-rw-r--r--cloudify/scripts/create.py72
-rw-r--r--cloudify/scripts/onap/configure_docker_secret_workaround.py40
-rw-r--r--cloudify/scripts/onap/create_init_pod.py65
-rw-r--r--cloudify/scripts/onap/create_resources_services.py131
-rw-r--r--cloudify/scripts/onap/delete_init_pod.py64
-rw-r--r--cloudify/scripts/onap/delete_namespace.py101
-rw-r--r--cloudify/scripts/onap/delete_resources_services.py132
-rw-r--r--cloudify/scripts/tasks.py24
-rw-r--r--cloudify/types/kubernetes.yaml91
-rw-r--r--cloudify/types/onap.yaml89
-rw-r--r--kubernetes/aai/resources/model-loader/conf/logback.xml (renamed from kubernetes/config/docker/init/src/config/log/aai/model-loader/logback.xml)324
-rw-r--r--kubernetes/aai/resources/resources/conf/logback.xml345
-rw-r--r--kubernetes/aai/resources/search-data-service/conf/logback.xml188
-rw-r--r--kubernetes/aai/resources/sparky-be/conf/logback.xml200
-rw-r--r--kubernetes/aai/resources/traversal/conf/logback.xml (renamed from kubernetes/config/docker/init/src/config/log/aai/ajsc-aai/logback.xml)640
-rw-r--r--kubernetes/aai/templates/aai-resources-deployment.yaml43
-rw-r--r--kubernetes/aai/templates/aai-traversal-deployment.yaml41
-rw-r--r--kubernetes/aai/templates/modelloader-deployment.yaml43
-rw-r--r--kubernetes/aai/templates/search-data-service-deployment.yaml35
-rw-r--r--kubernetes/aai/templates/sparky-be-deployment.yaml35
-rw-r--r--kubernetes/aai/values.yaml1
-rw-r--r--kubernetes/appc/templates/appc-deployment.yaml7
-rw-r--r--kubernetes/appc/templates/dgbuilder-deployment.yaml2
-rw-r--r--kubernetes/appc/values.yaml2
-rw-r--r--kubernetes/config/docker/init/src/config/appc/conf/aaiclient.properties202
-rw-r--r--kubernetes/config/docker/init/src/config/appc/conf/appc.properties42
-rw-r--r--kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql151
-rw-r--r--kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql134
-rw-r--r--kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql104
-rw-r--r--kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql37
-rw-r--r--kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql58
-rw-r--r--kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql2
-rwxr-xr-xkubernetes/config/docker/init/src/config/mso/mso/mso-docker.json4
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/dbcapp.properties8
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/fusion.properties34
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/portal.properties25
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/system.properties63
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/fusion.properties6
-rw-r--r--kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/logback.xml166
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/portal.properties5
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/system.properties13
-rw-r--r--kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/fusion.properties2
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/portal.properties5
-rwxr-xr-xkubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/system.properties9
-rw-r--r--kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPWIDGETMS/application.properties2
-rwxr-xr-xkubernetes/config/docker/init/src/config/robot/eteshare/config/vm_properties.py2
-rwxr-xr-xkubernetes/config/docker/init/src/config/sdnc/conf/aaiclient.properties231
-rwxr-xr-xkubernetes/config/docker/init/src/config/sdnc/conf/admportal.json2
-rw-r--r--kubernetes/oneclick/aaf.sh7
-rwxr-xr-xkubernetes/oneclick/aai.sh7
-rwxr-xr-xkubernetes/oneclick/appc.sh8
-rw-r--r--kubernetes/oneclick/clamp.sh7
-rwxr-xr-xkubernetes/oneclick/createAll.bash8
-rwxr-xr-xkubernetes/oneclick/dcae.sh18
-rwxr-xr-xkubernetes/oneclick/deleteAll.bash6
-rwxr-xr-xkubernetes/oneclick/message-router.sh7
-rwxr-xr-xkubernetes/oneclick/mso.sh7
-rwxr-xr-xkubernetes/oneclick/policy.sh12
-rwxr-xr-xkubernetes/oneclick/portal.sh8
-rwxr-xr-xkubernetes/oneclick/robot.sh5
-rwxr-xr-xkubernetes/oneclick/sdc.sh9
-rwxr-xr-xkubernetes/oneclick/sdnc.sh8
-rwxr-xr-xkubernetes/oneclick/vfc.sh17
-rwxr-xr-xkubernetes/oneclick/vid.sh6
-rw-r--r--kubernetes/oneclick/vnfsdk.sh7
-rwxr-xr-xkubernetes/portal/templates/portal-apps-deployment.yaml10
-rwxr-xr-xkubernetes/portal/templates/portal-mariadb-deployment.yaml4
-rw-r--r--kubernetes/portal/values.yaml6
-rw-r--r--kubernetes/sdc/templates/sdc-be.yaml6
-rw-r--r--kubernetes/sdnc/templates/all-services.yaml5
-rw-r--r--kubernetes/sdnc/templates/dgbuilder-deployment.yaml4
-rw-r--r--kubernetes/sdnc/templates/sdnc-deployment.yaml9
-rw-r--r--kubernetes/sdnc/templates/web-deployment.yaml14
-rw-r--r--kubernetes/sdnc/values.yaml6
-rwxr-xr-xkubernetes/vfc/templates/all-services.yaml143
-rw-r--r--onap-blueprint.yaml223
96 files changed, 3356 insertions, 2448 deletions
diff --git a/cloudify-onap/blueprint.yaml b/cloudify-onap/blueprint.yaml
new file mode 100644
index 0000000000..96d297dbd3
--- /dev/null
+++ b/cloudify-onap/blueprint.yaml
@@ -0,0 +1,188 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+ Blueprint deploys all ONAP kubernetes resources defined in YAML files on existing kubernetes cluster
+ The following pre-setup steps are assumed, but not required:
+ - Create Cloudify Example Environment: https://github.com/cloudify-examples/cloudify-environment-setup.
+ - Create Kubernetes Cluster: https://github.com/cloudify-examples/simple-kubernetes-blueprint.
+
+imports:
+ - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
+ # Plugin required: https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/releases/download/1.2.1rc1/cloudify_kubernetes_plugin-1.2.1rc1-py27-none-linux_x86_64-centos-Core.wgn
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
+# - https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1.zip
+ - plugins/onap-installation-plugin/plugin.yaml
+
+inputs:
+ kubernetes_configuration_file_content:
+ description: >
+ File content of kubernetes master YAML configuration
+ default:
+ apiVersion: v1
+ clusters:
+ - cluster:
+ certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFM01UQXdNakV6TVRNeU4xb1hEVEkzTURrek1ERXpNVE15TjFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTDN4CnRqRllVd25GVmxnNFZhd3BNaFB1N2hDOXVNSEJmUU9JclIrU1B4TElaMUVjTERXdTBod1pWWloxSUF4K0NrQlcKU1BmVDdXSE5zNld4RHAyRUpoVlp2TEFBVW9nem5aSlJnU0Z2RW1NZWg4cXhTaSsrQmtNNmxvTytTTVE0eFJlVApFbTZpd3JrdFZNUXVocDhkVG01MGpIUjMyelBTQklZRGpQejYwWEpzNXp2MEJzL0VlSHRDS0RxV2ZoZWpYZjBECmhQNi9DSU45UjBwNk9UZTdSYUV6dUtlblRtTml0dzNJTkg4b3BhYlY2TW8ycEFvbVRRUG5pZTVZZ2EzNGNndXEKTXpYSmVQUE1CQSs4UDNmS3BFcDJQU3UzWUF3SGg4VWo0MlRDTjMxQ1BBM3F4dUxGejBwOGw5ckJ5WVh6amRybQpwNFZIV1FGOTh3eXBFUW0xNFdVQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFBS3Q2a2E2R04rWksveEx5Nk1tcWpSQTZpa2QKMjYwS3g0ak9FWUtrNWtTemlPOE5Iazhza3JNNzZSVksxdUwxZkFzU0N5SnYyOUJNb3NGQkhISDltOGNBbnFFcgowRGVvdDhHY0d6OWh6ZTlOcGhGNkFNNEhCZTFDbDhkME9vdm91K3dsYk1VTDN4M1hNUHZmMDRKajZsMUJMdEx2Cjh2Nk5Eb0lWdkppS1FLM0tuK3BHUVBZd2x1UEFoSEZQZUdlZ1gvTnBlNXJjMVZGRUtLUkRQRVFRS0xrcE5TZ2IKNzgwZ2ZMTkxJUTJsQnczZHRjazcyelkyK0dlWlp3MEM2ckk5QUhBekg3ZXpZY0pvT2VmVjArZE9zV2lJNVB4UgowZnNYSlNEOVR4SDVVMlRSQ25KZ214M1ZLTGl5OTZyOGsrOTdkWlAvN3h2Q3dGSU9wc2I2UGcxdjNxZz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+ server: https://192.168.120.5:6443
+ name: kubernetes
+ contexts:
+ - context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ name: kubernetes-admin@kubernetes
+ current-context: kubernetes-admin@kubernetes
+ kind: Config
+ preferences: {}
+ users:
+ - name: kubernetes-admin
+ user:
+ client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJRUFMaVFKV0d2ZmN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4TnpFd01ESXhNekV6TWpkYUZ3MHhPREV3TURJeE16RXpNamxhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVOWFFJSlNzSmdtWndXMTEKcEtRZTV5TkQ1THBUdDY5dENKeEpoWXZPbElUOTRwVmdtVnBLNjhlMUNhQ2YxYkt0N2hCMUFOSVJHam8xMTYxNgpGUVRkaE5ZN0tnZFFUNGsrMC9mV0hBZzdCYnhKSDFCdEVvOGorTFk0dDRMNUVCS1l2dTN4Z21NS2E3SGMxZiszCkRQdWtndDZUZm1JazZEM0JkTHQyays2ZDhMclAzYmtkVmQwS1o2OWtnRnBuSWxtNkdrQWhrZWRleE5PQ21vRE8KU0hHMEJvaWphSjRuNUUrWGtNUEpHM3c3aUhNeE5RQ1Fob1lVOC84MGZHWUd5Y2M5TklLSlRFWmVjVGxOeCtPVApyd1F4aWRWa0FNK3I2K085ZlVMSmV1VDlhbXhzOEJNbVNwQko2TU0xelZaTDA5RGp3QytKSFRmY25IMTRBTTJ5CnN3c1hSUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFCNkRNanRFQkMvSTlkK1VqYWNXM0R5K0pZd21hYWdOUHJycApyaWV0OExlMHFuOS9GUkRpcnpTZldUNXFSNzg3dEpqRWdIaHlvZzFQWE1ZY3l6ajhRS0hyTXJXZUJMck5EQ08wCkVFTGMzSnNyUkdiK0MxWk1MQ3k1VmRVd2Q4ZnYzNm5vRlZOelVneitBTG9WSmVUUEdBYUxTbjA5OWo0RXhxUXoKK3FYTDltZEpQc1FwVVR4OTN3Z1ppcjJRMzcxdXovU21UYkxsOEhjMUtkbC9MODFXM0xYY3JrbFAvbFBkUG9yVgpjUmFBYm94NjBTMmxFTE16dUZHRThaR0lPYlFseDJUdzAxcENvckQ0NHVqOHU1ZmRiWkxhRHZuSHZDODlvSjhNCml5VnU5dkxSajhzbmZvb0ZLMjVqOGI1SzNGMGlLcWRzaktiSWVPS3dKWWkwdGNPWC9GYz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+ client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdU5YUUlKU3NKZ21ad1cxMXBLUWU1eU5ENUxwVHQ2OXRDSnhKaFl2T2xJVDk0cFZnCm1WcEs2OGUxQ2FDZjFiS3Q3aEIxQU5JUkdqbzExNjE2RlFUZGhOWTdLZ2RRVDRrKzAvZldIQWc3QmJ4SkgxQnQKRW84aitMWTR0NEw1RUJLWXZ1M3hnbU1LYTdIYzFmKzNEUHVrZ3Q2VGZtSWs2RDNCZEx0MmsrNmQ4THJQM2JrZApWZDBLWjY5a2dGcG5JbG02R2tBaGtlZGV4Tk9DbW9ET1NIRzBCb2lqYUo0bjVFK1hrTVBKRzN3N2lITXhOUUNRCmhvWVU4LzgwZkdZR3ljYzlOSUtKVEVaZWNUbE54K09UcndReGlkVmtBTStyNitPOWZVTEpldVQ5YW14czhCTW0KU3BCSjZNTTF6VlpMMDlEandDK0pIVGZjbkgxNEFNMnlzd3NYUlFJREFRQUJBb0lCQVFDTjVnL2ZRMlFaMjVyLwp4Vkhnc09MTW5lVThqdE5BbkZ1L2xZUllFeW5vR1dWejh5dnk3eisvWFZVZDdBTnBJc0NtbUZuSzVIcWV3UHVlCjlaNVl6cUJsbjdaU0lNVkdHbDV5aXIwRVRrbEpzUEFPSzBkaFRTVFhoc1ZxRlJHeWhpUmZ4L3AyR0NUaTR1eGcKR2UzaytMZW1oTXVFNWtIaFRhL2NOeUxrbFBjSjNJTHl5bnNOSmtwSlI4M0FBb1NxVkVObHhmQVFLT3VUVnNqOApRTXF4ekNMblU1RVFYc0pCamVNa0YrVllFdVRUZENUUm8vWXluR3g0K2ptR3IxTGNRRExjbllpTGtuS2YvTjNRCmpBV2QwUGVZS2p6V0M3aGs3a0JKdEl5MjcyVEtudS9nN3FIQktSVVNSUnBnWHFsTGJMZDVpZTdzTXJEdnMyd3AKMjhxK01XbE5Bb0dCQU5CSzVDc2JGNSsxZkx3VldPaWUxRlpTSVAzMnQvRWJvODJkS1VIZWxGVldEQXV3cXRUTQpsT2psUnRZVkxRODV1WWtxMExBRkl2TjNlNDlmcWQ3aUk3cEE4WE5uZnovWVozRGFEREZ0cVRuZU84VmlOTHkvCjd0SUh4bWVwWHJpay9GUXFNZjNmNnNiYzBTcDgvSXdXWlZ4Y2hmOVVFV3pOYmZpNDZoV3BjTjlUQW9HQkFPTXIKZzhrMHFlU01teTc5Z3pwMTVvT0VMWW9VdkhLZTJibk8wbnA1aldhbVprdGR1blRNMGg2dmdLQTF5UnNnejFjNApncmtZUkp5V1JhRElYb0YzOVdvMWJrbHZvaWN5NURqWUtBK0hKWUVUT0pmOC9odzJ1WngzTk54bm5UZXpJSVA5CnVDZHlJSDhRNFU1VkloeWVXTDV5WlR6WGs3YlhyRVllZjB3cW81UUhBb0dBZlhiZkFXZ01UdE5QRXl2NHBnWWwKK09qM25vSmlRZ1ZZSVo0dEg5UW1uVkI3YVA4OXAzaWpxYldSZVpxL3paQUR4aS9ZREc3TG9zT3gxYWJWOTc5WApyZlU0ZXo4NFV3alRKaEx4alVSMHpycWlYajdOYlhSZk1ud2tjb1IrM2RIamUvNytwZUdlMWJKays2YlZxTHhFCnllR2hoUzdxWUJOTnpxZnp1S0Nic0trQ2dZRUF6N2g4ZXNEekVJOFpYekJrakZJK2c4ZWJOSVdkZzFtSlVRT3oKSmxaN1czK1FUaDNZWHZEaXVUaFZieWwvU0pVSndvRmoxd2cveE1jTHgrY3ZzMGNUV3hpY1RmNEwwYmdSUTRFegpzRzh0ZGdjeldwYjFiS3NGc2ZLMm5Vc0pVV016dWoySDVGblJLUjh3UmNaR3VOQjU2VHNGSTBza1FLNlpVa3lVCnJmclhOSVVDZ1lFQW83NGp6NnBJbDgwOU51dERZeHVON3J4RzYrTVMwRkVRa2tTSGdtZTlvN0Y0QjNWRHJ6WE4Ka1g3dC95cm1ieHBjK0R2VmUzWm1hWHE1QXJzdjRVbW5Za1ArNCs0L2REcWdmbjd3ZmVKUkpzU1Uzd2V0YWJnRwpDQW5xMFpLR3RJWWhud1h5cWF6elBsUUFteFIxWUdDczIxL05kUVJvWDdsOFdyaUJmbkpVM3hZPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
+
+ namespace_prefix:
+ type: string
+ description: >
+ Kubernetes namespace name prefix which will be uese for all ONAP apps
+ default: onap
+
+dsl_definitions:
+ options: &app_options
+ namespace:
+ concat: [{ get_input: namespace_prefix }, '-', { get_property: [SELF, name] }]
+
+node_templates:
+ kubernetes_master:
+ type: cloudify.kubernetes.nodes.Master
+ properties:
+ configuration:
+ file_content: { get_input: kubernetes_configuration_file_content }
+
+ onap_environment:
+ type: cloudify.onap.kubernetes.Environment
+ properties:
+ namespace: { get_input: namespace_prefix }
+ init_pod: kubernetes/config
+ options:
+ namespace: { get_input: namespace_prefix }
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+
+# mso_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: mso
+# path: kubernetes/mso
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
+#
+ message_router_app:
+ type: cloudify.onap.kubernetes.App
+ properties:
+ name: message-router
+ path: kubernetes/message-router
+ options: *app_options
+ relationships:
+ - type: cloudify.kubernetes.relationships.managed_by_master
+ target: kubernetes_master
+ - type: cloudify.relationships.depends_on
+ target: onap_environment
+#
+# sdc_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: sdc
+# path: kubernetes/sdc
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
+#
+# aai_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: aai
+# path: kubernetes/aai
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
+#
+# robot_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: robot
+# path: kubernetes/robot
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
+#
+# vid_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: vid
+# path: kubernetes/vid
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
+#
+# sdnc_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: sdnc
+# path: kubernetes/sdnc
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
+#
+# portal_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: portal
+# path: kubernetes/portal
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
+#
+# policy_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: policy
+# path: kubernetes/policy
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
+#
+# appc_app:
+# type: cloudify.onap.kubernetes.App
+# properties:
+# name: appc
+# path: kubernetes/appc
+# options: *app_options
+# relationships:
+# - type: cloudify.kubernetes.relationships.managed_by_master
+# target: kubernetes_master
+# - type: cloudify.relationships.depends_on
+# target: onap_environment
diff --git a/cloudify-onap/docker-custom-readiness/Dockerfile b/cloudify-onap/docker-custom-readiness/Dockerfile
new file mode 100644
index 0000000000..d42456d336
--- /dev/null
+++ b/cloudify-onap/docker-custom-readiness/Dockerfile
@@ -0,0 +1,21 @@
+from ubuntu:16.04
+
+ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST"
+# Setup Corporate proxy
+ENV https_proxy ${HTTPS_PROXY}
+ENV http_proxy ${HTTP_PROXY}
+
+# Additional packages
+RUN apt-get update
+RUN apt-get install -y apt-utils git wget curl dnsutils python python-pip jq net-tools coreutils vim
+
+RUN pip install requests pyyaml kubernetes==1.0.2
+
+
+ENV CERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ENV TOKEN="/var/run/secrets/kubernetes.io/serviceaccount/token"
+
+COPY ready.py /root/ready.py
+RUN chmod a+x /root/ready.py
+#ENTRYPOINT /root/ready.py
+
diff --git a/cloudify-onap/docker-custom-readiness/ready.py b/cloudify-onap/docker-custom-readiness/ready.py
new file mode 100644
index 0000000000..22b24d345d
--- /dev/null
+++ b/cloudify-onap/docker-custom-readiness/ready.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+#from kubernetes import client, config
+import kubernetes
+import time, argparse, logging, sys, os, base64
+import yaml
+
+#setup logging
+log = logging.getLogger(__name__)
+handler = logging.StreamHandler(sys.stdout)
+handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
+handler.setLevel(logging.DEBUG)
+log.addHandler(handler)
+log.setLevel(logging.DEBUG)
+
+
+def is_ready(container_name):
+ log.info( "Checking if " + container_name + " is ready")
+
+ kubernetes.config.kube_config.KubeConfigLoader(config_dict=get_k8s_config_env()).load_and_set()
+ client = kubernetes.client
+ namespace = get_namespace_env()
+ v1 = client.CoreV1Api()
+
+ ready = False
+
+ try:
+ response = v1.list_namespaced_pod(namespace=namespace, watch=False)
+ for i in response.items:
+ for s in i.status.container_statuses:
+ if s.name == container_name:
+ log.debug ( "response %s" % response )
+ ready = s.ready
+ if not ready:
+ log.info( container_name + " is not ready.")
+ else:
+ log.info( container_name + " is ready!")
+ else:
+ continue
+ return ready
+ except Exception as e:
+ log.error("Exception when calling list_namespaced_pod: %s\n" % e)
+
+
+def get_k8s_config_env():
+ try:
+ k8s_config_env = os.environ.get("K8S_CONFIG_B64")
+ decoded = base64.b64decode(k8s_config_env)
+ return yaml.load(decoded)
+ except KeyError as ke:
+ raise Exception("K8S_CONFIG_B64 variable is not set.")
+
+
+def get_namespace_env():
+ try:
+ namespace_env = os.environ.get("NAMESPACE")
+ return namespace_env
+ except KeyError as ke:
+ raise Exception("NAMESPACE variable is not set.")
+
+
+def main(args):#from kubernetes import client, config
+
+ # args are a list of container names
+ for container_name in args:
+ # 5 min, TODO: make configurable
+ timeout = time.time() + 60 * 10
+ while True:
+ ready = is_ready(container_name)
+ if ready is True:
+ break
+ elif time.time() > timeout:
+ log.warning( "timed out waiting for '" + container_name + "' to be ready")
+ exit(1)
+ else:
+ time.sleep(5)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Process some names.')
+ parser.add_argument('--container-name', action='append', required=True, help='A container name')
+ args = parser.parse_args()
+ arg_dict = vars(args)
+
+ for arg in arg_dict.itervalues():
+ main(arg)
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/__init__.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/__init__.py
new file mode 100644
index 0000000000..19a30ba43d
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/__init__.py
@@ -0,0 +1,14 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/app.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/app.py
new file mode 100644
index 0000000000..fc8af2eea4
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/app.py
@@ -0,0 +1,47 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+
+from cloudify.decorators import operation
+
+from common import namespace
+from common import resources_services
+from common import workarounds
+
+
+@operation
+def configure_docker_secret(**kwargs):
+ workarounds.configure_secret()
+
+
+@operation
+def create_namespace(**kwargs):
+ namespace.do_create_namespace()
+
+
+@operation
+def create_resources_services(**kwargs):
+ resources_services.create_resoruces()
+
+
+@operation
+def delete_namespace(**kwargs):
+ namespace.do_delete_namespace()
+
+
+@operation
+def delete_resources_services(**kwargs):
+ resources_services.delete_resoruces()
+
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/__init__.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/__init__.py
new file mode 100644
index 0000000000..19a30ba43d
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/__init__.py
@@ -0,0 +1,14 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/constants.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/constants.py
new file mode 100644
index 0000000000..493a44f16f
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/constants.py
@@ -0,0 +1,20 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+HELM_URL = 'https://kubernetes-helm.storage.googleapis.com/helm-canary-linux-amd64.tar.gz'
+OOM_GIT_URL = 'https://gerrit.onap.org/r/oom.git'
+
+RT_HELM_CLI_PATH = "helm_cli_path"
+RT_APPS_ROOT_PATH = "app_root_path"
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/deployment_result.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/deployment_result.py
new file mode 100644
index 0000000000..48d49e0403
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/deployment_result.py
@@ -0,0 +1,27 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+from cloudify import ctx
+
+
+def save_deployment_result(key):
+ result = ctx.instance.runtime_properties['kubernetes']
+ ctx.instance.runtime_properties[key] = result
+ ctx.instance.runtime_properties['kubernetes'] = {}
+
+
+def set_deployment_result(key):
+ result = ctx.instance.runtime_properties.pop(key)
+ ctx.instance.runtime_properties['kubernetes'] = result \ No newline at end of file
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/helm.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/helm.py
new file mode 100644
index 0000000000..4404f6f832
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/helm.py
@@ -0,0 +1,62 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+import urllib
+import tarfile
+import os
+import tempfile
+from git import Repo
+
+def get_helm_path(url):
+ tarball = _fetch_helm(url)
+ helm_dir = _get_tmp_file_name()
+ _untar_helm_archive(tarball, helm_dir)
+ helm_binary_path = _find_file('helm', helm_dir)
+ return helm_binary_path
+
+
+def get_apps_root_path(git_url):
+ dst_repo_path = _get_tmp_file_name()
+ Repo.clone_from(git_url, dst_repo_path)
+ apps_root = format(dst_repo_path)
+ return apps_root
+
+def _fetch_helm(url):
+ dst_tar_path = _get_tmp_file_name()
+
+ file = urllib.URLopener()
+ file.retrieve(url, dst_tar_path)
+
+ return dst_tar_path
+
+def _untar_helm_archive(tar_path, helm_dir):
+ helm_tar = tarfile.open(tar_path)
+ helm_tar.extractall(helm_dir)
+ helm_tar.close()
+
+
+def _find_file(filename, base_path):
+ for root, dirs, files in os.walk(base_path):
+ for name in files:
+ if name == filename:
+ return os.path.abspath(os.path.join(root, name))
+
+ raise Exception('Cannot find helm binary')
+
+
+def _get_tmp_file_name():
+ return '{}/{}'.format(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
+
+
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/init_pod.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/init_pod.py
new file mode 100644
index 0000000000..1376818b7b
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/init_pod.py
@@ -0,0 +1,63 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+from cloudify import ctx
+import yaml
+
+import constants
+import resources_services
+
+SERVICES_FILE_PARTS_SEPARATOR = '---'
+
+
+def do_create_init_pod():
+ ctx.logger.info('Creating init pod')
+
+ yaml_config = resources_services.render_chart(
+ ctx.node.properties["init_pod"],
+ _retrieve_root_path(),
+ _retrieve_helm_cli_path()
+ )
+ yaml_content_part = yaml_config.split(SERVICES_FILE_PARTS_SEPARATOR)[2]
+ enhanced_yaml = _add_openstack_envs(yaml_content_part)
+
+ resources_services.create_resource(enhanced_yaml)
+
+ ctx.logger.info('Init pod created successfully')
+
+
+def do_delete_init_pod():
+ ctx.logger.info('Deleting init pod')
+
+ ctx.logger.info('Init pod deleted successfully')
+
+def _add_openstack_envs(yaml_content):
+ input_dict = yaml.load(yaml_content)
+
+ container_dict = input_dict['spec']['containers'][0]
+ container_dict.pop('envFrom')
+
+ openstack_envs = ctx.node.properties["openstack_envs"]
+ for item in openstack_envs.items():
+ ctx.logger.debug("adding item = {}".format(item))
+ container_dict['env'].append(item)
+
+ return input_dict
+
+def _retrieve_root_path():
+ return ctx.instance.runtime_properties.get(constants.RT_APPS_ROOT_PATH, None)
+
+def _retrieve_helm_cli_path():
+ return ctx.instance.runtime_properties.get(constants.RT_HELM_CLI_PATH, None) \ No newline at end of file
diff --git a/cloudify/scripts/onap/create_namespace.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/namespace.py
index c0f1f19680..d1336768ac 100644
--- a/cloudify/scripts/onap/create_namespace.py
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/namespace.py
@@ -1,37 +1,67 @@
-import pip
-
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+import cloudify_kubernetes.tasks as kubernetes_plugin
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
+import deployment_result
-def _import_or_install():
- try:
- import yaml
- except ImportError:
- pip.main(["install", "pyaml"])
- try:
- import cloudify_kubernetes.tasks as kubernetes_plugin
- except ImportError:
- pip.main([
- "install",
- "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
- ])
+def do_create_namespace():
+ namespace = _retrieve_namespace()
+ ctx.logger.info('Creating namespace: {0}'.format(namespace))
- import yaml
- import cloudify_kubernetes.tasks as kubernetes_plugin
+ namespace_resource_template = _prepare_namespace_resource_template(
+ namespace
+ )
- return yaml, kubernetes_plugin
+ ctx.logger.debug(
+ 'Kubernetes object which will be deployed: {0}'
+ .format(namespace_resource_template)
+ )
+ kubernetes_plugin.custom_resource_create(**namespace_resource_template)
+ deployment_result.save_deployment_result('namespace')
+ ctx.logger.info('Namespace created successfully')
-def _retrieve_namespace():
- namespace = ctx.node.properties.get(
- 'namespace',
- ctx.node.properties
- .get('options', {})
- .get('namespace', None)
+
+def do_delete_namespace():
+ namespace = _retrieve_namespace()
+ ctx.logger.info('Deleting namespace: {0}'.format(namespace))
+
+ namespace_resource_template = _prepare_namespace_resource_template(
+ namespace
+ )
+
+ ctx.logger.debug(
+ 'Kubernetes object which will be deleted: {0}'
+ .format(namespace_resource_template)
)
+ deployment_result.set_deployment_result('namespace')
+ kubernetes_plugin.custom_resource_delete(**namespace_resource_template)
+ ctx.logger.info('Namespace deleted successfully')
+
+
+
+def _retrieve_namespace():
+
+ default_namespace = ctx.node.properties.get('options', {}).get('namespace')
+ namespace = ctx.node.properties.get('namespace', default_namespace)
+
if not namespace:
raise NonRecoverableError(
'Namespace is not defined (node={})'.format(ctx.node.name)
@@ -69,33 +99,3 @@ def _prepare_namespace_resource_template(name):
}
}
}
-
-
-def _save_deployment_result(key):
- result = ctx.instance.runtime_properties['kubernetes']
- ctx.instance.runtime_properties[key] = result
- ctx.instance.runtime_properties['kubernetes'] = {}
-
-
-def _do_create_namespace(kubernetes_plugin):
- namespace = _retrieve_namespace()
- ctx.logger.info('Creating namespace: {0}'.format(namespace))
-
- namespace_resource_template = _prepare_namespace_resource_template(
- namespace
- )
-
- ctx.logger.debug(
- 'Kubernetes object which will be deployed: {0}'
- .format(namespace_resource_template)
- )
-
- kubernetes_plugin.custom_resource_create(**namespace_resource_template)
- _save_deployment_result('namespace')
- ctx.logger.info('Namespace created successfully')
-
-
-if __name__ == '__main__':
- _, kubernetes_plugin = _import_or_install()
-
- _do_create_namespace(kubernetes_plugin)
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/resources_services.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/resources_services.py
new file mode 100644
index 0000000000..268068f00c
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/resources_services.py
@@ -0,0 +1,230 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+import subprocess
+
+import cloudify_kubernetes.tasks as kubernetes_plugin
+import yaml
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+import constants
+import deployment_result
+import time
+import ast
+import json
+import base64
+
+SERVICES_FILE_PARTS_SEPARATOR = '---'
+
+
+def create_resoruces():
+ ctx.logger.info('Creating resources')
+ apps_path = _retrieve_root_path()
+
+ if not apps_path:
+ ctx.logger.warn(
+ 'Apps dir is not defined. Skipping!'
+ )
+
+ return
+
+ helm_app = ctx.node.properties.get('path', None)
+
+ yaml_file = prepare_content(helm_app)
+
+ yaml_content_parts = yaml_file.split(SERVICES_FILE_PARTS_SEPARATOR)
+
+ for yaml_content_part in yaml_content_parts:
+ if yaml_content_part:
+ yaml_content = _apply_readiness_workaround(yaml_content_part)
+ if yaml_content:
+ create_resource(yaml_content)
+
+ ctx.logger.info('Resource created successfully')
+
+def delete_resoruces():
+
+ ctx.logger.info('Deleting resources')
+ apps_path = _retrieve_root_path()
+
+ if not apps_path:
+ ctx.logger.warn(
+ 'Apps dir is not defined. Skipping!'
+ )
+ return
+
+ helm_app = ctx.node.properties.get('path', None)
+
+ yaml_file = prepare_content(helm_app)
+
+ yaml_content_parts = yaml_file.split(SERVICES_FILE_PARTS_SEPARATOR)
+
+ for yaml_content_part in yaml_content_parts:
+ if yaml_content_part:
+ yaml_content = _apply_readiness_workaround(yaml_content_part)
+ if yaml_content:
+ delete_resource(yaml_content)
+
+ ctx.logger.info('Resources deleted successfully')
+
+
+def prepare_content(resource):
+ helm_path = _retrieve_helm_cli_path()
+ yaml_file = render_chart(resource, _retrieve_root_path(), helm_path)
+
+ return yaml_file
+
+
+def create_resource(yaml_content_dict):
+ ctx.logger.debug("Loading yaml: {}".format(yaml_content_dict))
+
+ if yaml_content_dict.get('kind', '') == 'PersistentVolumeClaim':
+ ctx.logger.debug("PersistentVolumeClaim custom handling")
+ kubernetes_plugin.custom_resource_create(definition=yaml_content_dict, api_mapping=_get_persistent_volume_mapping_claim_api())
+ else:
+ kubernetes_plugin.resource_create(definition=yaml_content_dict)
+
+ deployment_result.save_deployment_result('resource_{0}'.format(yaml_content_dict['metadata']['name']))
+
+def delete_resource(yaml_content_dict):
+ ctx.logger.debug("Loading yaml: {}".format(yaml_content_dict))
+
+ deployment_result.save_deployment_result('resource_{0}'.format(yaml_content_dict['metadata']['name']))
+ if yaml_content_dict.get('kind', '') == 'PersistentVolumeClaim':
+ ctx.logger.debug("PersistentVolumeClaim custom handling")
+ kubernetes_plugin.custom_resource_delete(definition=yaml_content_dict, api_mapping=_get_persistent_volume_mapping_claim_api())
+ else:
+ kubernetes_plugin.resource_delete(definition=yaml_content_dict)
+
+
+def render_chart(app, app_root_path, helm_cli_path):
+ app_chart_path = "{}/{}/".format(app_root_path, app)
+ ctx.logger.debug('App chart path = {}'.format(app_chart_path))
+ return _exec_helm_template(helm_cli_path, app_chart_path)
+
+
+def _exec_helm_template(helm_path, chart):
+ cmd = '{0} template {1}'.format(helm_path, chart)
+ ctx.logger.debug('Executing helm template cmd: {}'.format(cmd))
+ rendered = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE).stdout.read().decode()
+
+ return rendered
+
+def _get_persistent_volume_mapping_claim_api():
+ api_mapping = {
+ 'create' : {
+ 'api': 'CoreV1Api',
+ 'method': 'create_namespaced_persistent_volume_claim',
+ 'payload': 'V1PersistentVolumeClaim'
+ },
+ 'read' : {
+ 'api': 'CoreV1Api',
+ 'method': 'read_namespaced_persistent_volume_claim',
+ },
+ 'delete': {
+ 'api': 'CoreV1Api',
+ 'method': 'delete_namespaced_persistent_volume_claim',
+ 'payload': 'V1DeleteOptions'
+ }
+ }
+
+ return api_mapping
+
+
+def _apply_readiness_workaround(yaml_file):
+ b64_env = _get_k8s_b64_env()
+
+ input_dict = yaml.load(yaml_file)
+
+ try:
+ init_containers = input_dict['spec']['template']['metadata']['annotations'][
+ 'pod.beta.kubernetes.io/init-containers']
+ init_cont_list = eval(init_containers)
+
+ new_init_cont_list = list()
+ new_cont = None
+ for init_cont in init_cont_list:
+ if "oomk8s/readiness-check" in init_cont['image']:
+ init_cont['image'] = "clfy/oomk8s-cfy-readiness-check:1.0.1"
+ #init_cont['imagePullPolicy'] = "IfNotPresent"
+ init_cont['env'].append(b64_env)
+ new_cont = init_cont
+ new_init_cont_list.append(json.dumps(init_cont))
+
+ new_payload = ",".join(new_init_cont_list)
+
+ if new_cont:
+ input_dict['spec']['template']['metadata']['annotations'].pop('pod.beta.kubernetes.io/init-containers')
+ input_dict['spec']['template']['metadata']['annotations']['pod.beta.kubernetes.io/init-containers'] = '[{}]'.format(new_payload)
+
+
+ except KeyError as ke:
+ ctx.logger.debug('Readiness section is not found.')
+
+ return input_dict
+
+
+def _get_k8s_b64():
+ target_relationship = _retrieve_managed_by_master()
+
+ k8s_config = target_relationship.node.properties.get('configuration').get('file_content')
+
+ if not k8s_config:
+ raise Exception("Cannot find kubernetes config")
+
+ k8s_config_plain = yaml.dump(k8s_config, allow_unicode=True)
+
+ k8s_config_b64 = base64.b64encode(k8s_config_plain)
+
+ return k8s_config_b64
+
+
+def _get_k8s_b64_env():
+ env = dict()
+ env['name'] = 'K8S_CONFIG_B64'
+ env['value'] = _get_k8s_b64()
+ return env
+
+
+def _retrieve_root_path():
+ target_relationship = _retrieve_depends_on()
+
+ apps_root_path = target_relationship.instance.runtime_properties.get(constants.RT_APPS_ROOT_PATH, None)
+
+ ctx.logger.debug("Retrived apps root path = {}".format(apps_root_path))
+
+ return apps_root_path
+
+def _retrieve_helm_cli_path():
+ target_relationship = _retrieve_depends_on()
+
+ helm_cli_path = target_relationship.instance.runtime_properties.get(constants.RT_HELM_CLI_PATH, None)
+
+ ctx.logger.debug("Retrived helm clis path = {}".format(helm_cli_path))
+
+ return helm_cli_path
+
+def _retrieve_depends_on():
+ result = None
+ for relationship in ctx.instance.relationships:
+ if relationship.type == 'cloudify.relationships.depends_on':
+ return relationship.target
+
+def _retrieve_managed_by_master():
+ result = None
+ for relationship in ctx.instance.relationships:
+ if relationship.type == 'cloudify.kubernetes.relationships.managed_by_master':
+ return relationship.target
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/workarounds.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/workarounds.py
new file mode 100644
index 0000000000..fe3e892c5b
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/workarounds.py
@@ -0,0 +1,67 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+from fabric import api as fabric_api
+
+def _retrieve_namespace():
+ namespace = ctx.node.properties.get(
+ 'namespace',
+ ctx.node.properties
+ .get('options', {})
+ .get('namespace', None)
+ )
+
+ if not namespace:
+ raise NonRecoverableError(
+ 'Namespace is not defined (node={})'.format(ctx.node.name)
+ )
+
+ return namespace
+
+
+def configure_secret():
+ namespace = _retrieve_namespace()
+ ctx.logger.info(
+ 'Configuring docker secrets for namespace: {0}'.format(namespace)
+ )
+
+ command = 'kubectl create secret ' \
+ 'docker-registry onap-docker-registry-key ' \
+ '--docker-server=nexus3.onap.org:10001 ' \
+ '--docker-username=docker ' \
+ '--docker-password=docker ' \
+ '--docker-email=email@email.com ' \
+ '--namespace={0}'.format(namespace)
+
+ ctx.logger.info('Command "{0}" will be executed'.format(command))
+
+ with fabric_api.settings(
+ **ctx.node.properties.get('ssh_credentials')):
+ fabric_api.run(command)
+
+ ctx.logger.info('Docker secrets configured successfully')
+
+
+def _get_fabric_env():
+ result = dict()
+
+ result['host_string'] = ctx.node.properties.get('ssh_credentials')['host_string']
+ result['user'] = ctx.node.properties.get('ssh_credentials')['user']
+ result['key'] = ctx.node.properties.get('ssh_credentials')['key']
+
+ return result
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/environment.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/environment.py
new file mode 100644
index 0000000000..741f28ec32
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/environment.py
@@ -0,0 +1,58 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+
+from cloudify import ctx
+from cloudify.decorators import operation
+
+from common import constants
+from common import helm
+from common import init_pod, namespace
+
+
+@operation
+def create_init_pod(**kwargs):
+ init_pod.do_create_init_pod()
+ pass
+
+
+@operation
+def create_namespace(**kwargs):
+ namespace.do_create_namespace()
+
+
+@operation
+def delete_init_pod(**kwargs):
+ init_pod.do_delete_init_pod()
+
+
+@operation
+def delete_namespace(**kwargs):
+ namespace.do_delete_namespace()
+
+
+@operation
+def setup_helm_templates(**kwargs):
+ helm_url = constants.HELM_URL
+ ctx.instance.runtime_properties[constants.RT_HELM_CLI_PATH] = helm.get_helm_path(helm_url)
+ ctx.logger.debug('Helm cli path = {}'.format(ctx.instance.runtime_properties[constants.RT_HELM_CLI_PATH]))
+
+ oom_git_url = constants.OOM_GIT_URL
+ ctx.instance.runtime_properties[constants.RT_APPS_ROOT_PATH] = helm.get_apps_root_path(oom_git_url)
+ ctx.logger.debug('Apps root path = {}'.format(ctx.instance.runtime_properties[constants.RT_APPS_ROOT_PATH]))
+
+
+
+
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/requirements.txt b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/requirements.txt
new file mode 100644
index 0000000000..9d8d880932
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/requirements.txt
@@ -0,0 +1 @@
+https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1.zip \ No newline at end of file
diff --git a/cloudify-onap/plugins/onap-installation-plugin/plugin.yaml b/cloudify-onap/plugins/onap-installation-plugin/plugin.yaml
new file mode 100755
index 0000000000..f88b50bc78
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/plugin.yaml
@@ -0,0 +1,87 @@
+plugins:
+ onap:
+ executor: central_deployment_agent
+ package_name: onap-installation-plugin
+ package_version: '1.0.0'
+ source: onap-installation-plugin
+ # install_arguments: '--process-dependency-links'
+
+node_types:
+ cloudify.onap.kubernetes.Environment:
+ derived_from: cloudify.nodes.Root
+ properties:
+ namespace:
+ type: string
+ init_pod:
+ type: string
+ description: >
+ Path to init pod chart
+ openstack_envs:
+ description: >
+ ONAP parameters defined at the onap-parameters.yaml file
+ default: {}
+ options:
+ description: >
+ For compatibility with kubernetes plugin.
+ To be removed in the future.
+ default: {}
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: onap.k8s_installer.environment.create_namespace
+ executor: central_deployment_agent
+ configure:
+ implementation: onap.k8s_installer.environment.setup_helm_templates
+ executor: central_deployment_agent
+ start:
+ implementation: onap.k8s_installer.environment.create_init_pod
+ executor: central_deployment_agent
+ stop:
+ implementation: onap.k8s_installer.environment.delete_init_pod
+ executor: central_deployment_agent
+ delete:
+ implementation: onap.k8s_installer.environment.delete_namespace
+ executor: central_deployment_agent
+
+ cloudify.onap.kubernetes.App:
+ derived_from: cloudify.nodes.Root
+ properties:
+ name:
+ type: string
+ description: >
+ Name of ONAP app
+ path:
+ type: string
+ description: >
+ Paths (relative, blueprint prespective) to app chart directory
+ required: false
+ options:
+ description: >
+ For compatibility with kubernetes plugin.
+ To be removed in the future.
+ default: {}
+ ssh_credentials:
+ description: >
+ Workaround for docker secret settings
+ via fabric plugin
+ default:
+ host_string: { get_secret: kubernetes_master_ip }
+ user: { get_secret: agent_user }
+ key: { get_secret: agent_key_private }
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: onap.k8s_installer.app.create_namespace
+ executor: central_deployment_agent
+ configure:
+ implementation: onap.k8s_installer.app.configure_docker_secret
+ executor: central_deployment_agent
+ start:
+ implementation: onap.k8s_installer.app.create_resources_services
+ executor: central_deployment_agent
+ stop:
+ implementation: onap.k8s_installer.app.delete_resources_services
+ executor: central_deployment_agent
+ delete:
+ implementation: onap.k8s_installer.app.delete_namespace
+ executor: central_deployment_agent
diff --git a/cloudify-onap/plugins/onap-installation-plugin/requirements.txt b/cloudify-onap/plugins/onap-installation-plugin/requirements.txt
new file mode 100644
index 0000000000..9d8d880932
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/requirements.txt
@@ -0,0 +1 @@
+https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1.zip \ No newline at end of file
diff --git a/cloudify-onap/plugins/onap-installation-plugin/setup.py b/cloudify-onap/plugins/onap-installation-plugin/setup.py
new file mode 100755
index 0000000000..d413daa4ac
--- /dev/null
+++ b/cloudify-onap/plugins/onap-installation-plugin/setup.py
@@ -0,0 +1,40 @@
+########
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+
+from setuptools import setup
+
+try:
+ import cloudify_kubernetes
+except ImportError:
+ import pip
+ pip.main(['install', 'https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1.zip'])
+
+setup(
+ name='onap-installation-plugin',
+ version='1.0.0',
+ author='',
+ author_email='',
+ packages=['k8s_installer', 'k8s_installer.common'],
+ install_requires=[
+ 'cloudify-plugins-common>=3.3.1',
+ 'cloudify-kubernetes-plugin==1.2.1',
+ #'/tmp/k8spl/cloudify-kubernetes-plugin'
+ 'pyyaml',
+ 'gitpython',
+ 'paramiko==1.18.3',
+ 'fabric==1.13.1'
+ ]
+)
diff --git a/cloudify/inputs/message-router-blueprint.yaml.example b/cloudify/inputs/message-router-blueprint.yaml.example
deleted file mode 100644
index ecab0eca2c..0000000000
--- a/cloudify/inputs/message-router-blueprint.yaml.example
+++ /dev/null
@@ -1,25 +0,0 @@
-join_command: kubeadm join --token f66aad.cb001cc90bd69b38 192.168.120.6:6443
-kubernetes_master_ip: 192.168.120.6
-flavor: 3
-configuration_file_content:
- apiVersion: v1
- clusters:
- - cluster:
- certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFM01EZ3dNekEzTXpJek4xb1hEVEkzTURnd01UQTNNekl6TjFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTUF4Ckxzdmkyek1ZU0pjaG5QWjVDUkJQTnBLbklHTDlHY1FYRFZnQjNEc0FuaTVpc2VadDlmeENtOURxSS94NkkrRGoKSlA5ZkNNbEo5a3Z1OGgvZFF4dWJFbHhaSmZkdkFqY3p0RlVWdGpaVGREcTFDTk81UENOcnNRSkdQVS9HWDNzagpRWmlHYVNPYmJJOGQ0d2Z0bkI5dE51ZDNXMnZDZmZJUzNCNU9YMVRVMzBjVE1xVnJjZ0FLT2EvR2FUK01WV3c2CkVHZDErWmVoYWZBUWJDeG1jbHRpMlJHSUNVakpLc2xqUFRUS3JTNXJVMkwxUjdYSFd3SUhyWWtuZ05SQllwTkQKaXk3UjlCZy93S1dkMVNYVVpUODU3eE8xdjB0aU9ucFJML0tGS2IrcHBKUnVITDVORE9TbTJZSzR1OFI3MjFudgpyYVNOSTk2K0VoVGhWL2U4VWU4Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFFOUFhbWQzL0JmRVAyRitSeXJRdXp2TGQzSWEKbGZoR3Fab1JqZWFId1pnanVwQVh0VXdzd0JiYkFhZm5XMXJDd3VDVldRYXVYVWhyZ1VNelcvbEQ2blBYYWtUcgpwWTJ6NG83ZG90dlZSekVtN0dmWllMUUs2cW9Wczk4TTRjS3RhdjVlL3VVcXFGckY2NVYzUE1QV3M1NGp2Q1U5CklFTDJ0ZmQ1TzFrMGlEcXFtdWdBVjgxblNOdHlnK0FZN3o5SVdXRFhKcTNUQ1RHQnZLQmxCdzNWSDVBbnQxblEKSFNrSmJ0ZGhpaFA0KzU0emlKZEhPNFcxekFGam4zUVpIZVZDNU8rSkdSOWNZWW5aTHc4ZC92YmxZeXRpTWZPVwoyN3VzcW1RbmtPZDliNXozaTlvRDBvUUYyY1RObk85NzJkeTBuTmhiK0VMclpGNEpKUS9XVjB0Z083ST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
- server: https://192.168.120.6:6443
- name: kubernetes
- contexts:
- - context:
- cluster: kubernetes
- user: kubernetes-admin
- name: kubernetes-admin@kubernetes
- current-context: kubernetes-admin@kubernetes
- kind: Config
- preferences: {}
- users:
- - name: kubernetes-admin
- user:
- client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJSm9EQWNpYWVkSVF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4TnpBNE1ETXdOek15TXpkYUZ3MHhPREE0TURNd056TXlNemxhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQW1xd3duNlU0TFVFQkphMWUKQzIrUjM0K0oyZ3BBYTJ0aDVOZXdnS2NhUWQwaE5BODNjNE1IMDFVUjU3b3ByNUNFczFQVmVwMkZtczlpaFRITwo0SUpINjkxUVQvTUVJZE5iWTl0RXdDV21ia1lMbFBjc09yclErYTl5VGdxYm5IWjBONnJOdUZ4dDB2alRPSUR1CmRDMnBQR3dFMW5kaHd1VVB3UUFxeS9SVjN6MTgzRnoyOWZuVHg3UXdWR0J4Rk84Z0JxRTFRYTVYenhIZ0lTQ2sKSkJka2FtRUFhSjl6NHgwZjFmbHQ4MG4wZ3RHRitkbUZuMThkbGwzZmoreGpNOGxqS21QZnRNdlc4MXF0bkVnZApoU1I3bWdMODlUckx3SmFtNkxmVmZhN29CWWJvWUMyT2gvKytZMkpwOXRpRkMyZ1ExeVBXSHJBMVZJTVBQUWdkCk8yTGNuUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFIZ2ZjRVd6R08yQ1p0cEJFbUxzbllXWTJmdGlSOU1BNHY5OQpXVFhBUzNzZ3VJTm43WktUUElSeTVyTmVmSTVhS1ltMWMyU0w5ZzJlM0JpeFZUUHRsYmRWczVBanMxWnVWRGRkClhmYk93blozcnBQbDZoenpxSVh2VmxsNzI4VC9hZDRJbmZ6SFVtT1o3YSs4enBIUS9EREZKLzR1aDYrSVlnSFkKVzBBQmFXMXpOc3lQSzNhK3paV0ROSVFvNS8yTVFJYkNwN1ZQOHhobUUxZ1diY1BxVmJ1YVZJY09IZjkvUVhqeQpKZTdoK2tzSEJPNUFZczRZOFZBYXlVb0t4bTJZbmNkZHJGTWl4STRKNEkrSUp5aGRPdE5TNG1lTmcyMXIwN3U2ClZkL2E2SGt6ekxFcmdqWkxzVktIK0RUMTVhTWNSZGg3OVE1YXo1ckh1Qm5vZ0RYejFtVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
- client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBbXF3d242VTRMVUVCSmExZUMyK1IzNCtKMmdwQWEydGg1TmV3Z0tjYVFkMGhOQTgzCmM0TUgwMVVSNTdvcHI1Q0VzMVBWZXAyRm1zOWloVEhPNElKSDY5MVFUL01FSWROYlk5dEV3Q1dtYmtZTGxQY3MKT3JyUSthOXlUZ3FibkhaME42ck51Rnh0MHZqVE9JRHVkQzJwUEd3RTFuZGh3dVVQd1FBcXkvUlYzejE4M0Z6Mgo5Zm5UeDdRd1ZHQnhGTzhnQnFFMVFhNVh6eEhnSVNDa0pCZGthbUVBYUo5ejR4MGYxZmx0ODBuMGd0R0YrZG1GCm4xOGRsbDNmait4ak04bGpLbVBmdE12VzgxcXRuRWdkaFNSN21nTDg5VHJMd0phbTZMZlZmYTdvQllib1lDMk8KaC8rK1kySnA5dGlGQzJnUTF5UFdIckExVklNUFBRZ2RPMkxjblFJREFRQUJBb0lCQUhxbjMrdEo5ekdUNGhnQgowcGxaQWFINnp3TzBxMzlENlo2ekdNbjlPY3BQVkp4WEVNOHVjbzg1WC9pV1hhWlhBWlMvLzRPNzFRNStOUStRCi94QjA0Qm9BS0VjdVhQR0NEWEF6bXVLUk9Oa3IvTlZGNmJJdElibFBVMkxsOEo3MEpKZGNnTVVacnhIbHRvS1IKWkFlSGlqUmJLTDcyYnZWQjl1dERlYXpCZHpPTzhHbG5VaU5WTWRoaVowazRNbEFobmV0ZjNNazFHbXFjbHJyNApISjIwbElSR2NWTWRqZm1OaThFVG5LckRwWWNvRUZ5QnozMVN2RHVTaU1GVm9sUWpZMkU1N2kyd1RVdDlSU1NjCk5oRlpEM2s1dkxwMFNIcjZtSXRURW1jY0w2VDdzTDh0UXNGLzhaZG9aUXpoRzRXUU5IZ00yUldsdEN4eklCNy8KT3czUk5OVUNnWUVBelcvNVdkWk5QV2hsRXR2VGQ4a1FjbUF3VkVYaGgrU2NvajhpVGdHbW5GNXhsSGhWVjZUdwpVYzRtRmhGU0JBSGpRWlN5Vm1NTDkwMWU1UE1aOXVRQ05Xb0pWVzU4cUI0VDJsRXNKRjJkdXdRSVZDL2g4QkhiClJ4TVZLaDJhdHZKR2dHbWsxME5tblZTYmxQVVpDVVBRWFN4R1B5VXh0UStSSmRUNHVPSm43QXNDZ1lFQXdMMnIKNUlQeFRvTHplZ254b0I5Z0RnbnFBazB3b3NicHg3V2pJY2RpdnlWNGpib2U3TmlYbEpQZXJ3MmExd2M2Ky96VgpSeVpkUjN2U1lrUnczNnp4Q1N0UHZhRFVMT053eDhtSjVRVVIwYXdReEQ4R1ZneHZmVTBhYzdqeW04L2laZWpjCkk5V1UxOXo0eEk3akIvMXNYOFpFTWFtb1RXOGVUM0I4aWNPUEd2Y0NnWUVBcWtOZmVlRnU2cklXOHVaV0FUVVcKK0hSWUdjQkJCd3VsOWFJMW9Fa2wrUHNkVDF2Yi8yT24rV1RObEFTTzROdGZxZjYvUDNHZmZUc1dwdElFZHViSwpIZExnSVhvTXZwa1BBeVc3Vy9ocXZaQytCbWdZN1lzZkhXem5ZWnhmbWJoNlRmdEFyMWdoTjh2amxqVDhwdjBaCk45OTE2T2UrcHIxY0l1cTdxUitiMmJrQ2dZQUxMYlQvZnV1SzZ5dGw0NWZBK3JEZWY1S3o2WGd0cUsyOGFIdDYKcFE3RUdVOUJvUTdVRzhmRzFVQ3dGSERya2I3SkNLUHlDWGFWZzhmeTdSZEMwY3YxQlM4Tngzc20wMVlpTUQwdwpMRGdaV2dwcTUyRGRzc0R3bW4welF3SEhLYXB1MEsrYjRISk9oc0ZpM1FxcjF2WG5KQittWmJtZUxCaXM4TkE0ClNVQk9od0tCZ0drTkJhaTFWVU9RaGVYU3Mrb3JPVWxDNDNsenlBanJZa2dod0lRd25LTWpqK2lrak9oNmtqc3IKL1lDTHVRcWNBYWNKVEF2VmZOVGcyZldyUUJTODlwVjlFRVBnV0JIQmt4a1JsNnY0WTFQZVRqOVVzeVdzaHljYQpXRkFHSkpDMXg1NWg2OWdFWSsyR1NpUEQ0MzNrQUZUd3NBUEhPbmRwdmlOTVBLek9KTldnCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
-
-
diff --git a/cloudify/scripts/configure_node.py b/cloudify/scripts/configure_node.py
deleted file mode 100644
index 9cfa206b54..0000000000
--- a/cloudify/scripts/configure_node.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-import subprocess
-from cloudify import ctx
-from cloudify.state import ctx_parameters as inputs
-
-
-def execute_command(_command):
-
- ctx.logger.debug('_command {0}.'.format(_command))
-
- subprocess_args = {
- 'args': _command.split(),
- 'stdout': subprocess.PIPE,
- 'stderr': subprocess.PIPE
- }
-
- ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
-
- process = subprocess.Popen(**subprocess_args)
- output, error = process.communicate()
-
- ctx.logger.debug('command: {0} '.format(_command))
- ctx.logger.debug('output: {0} '.format(output))
- ctx.logger.debug('error: {0} '.format(error))
- ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
-
- if process.returncode:
- ctx.logger.error('Running `{0}` returns error.'.format(_command))
- return False
-
- return output
-
-
-if __name__ == '__main__':
-
- join_command = inputs['join_command']
- join_command = 'sudo {0} --skip-preflight-checks'.format(join_command)
- execute_command(join_command)
-
- # Install weave-related utils
- execute_command('sudo curl -L git.io/weave -o /usr/local/bin/weave')
- execute_command('sudo chmod a+x /usr/local/bin/weave')
- execute_command('sudo curl -L git.io/scope -o /usr/local/bin/scope')
- execute_command('sudo chmod a+x /usr/local/bin/scope')
- execute_command('/usr/local/bin/scope launch')
-
- hostname = execute_command('hostname')
- ctx.instance.runtime_properties['hostname'] = hostname.rstrip('\n')
diff --git a/cloudify/scripts/create.py b/cloudify/scripts/create.py
deleted file mode 100644
index eb362a4558..0000000000
--- a/cloudify/scripts/create.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-
-import subprocess
-from cloudify import ctx
-from cloudify.exceptions import OperationRetry
-
-
-def check_command(command):
-
- try:
- process = subprocess.Popen(
- command.split()
- )
- except OSError:
- return False
-
- output, error = process.communicate()
-
- ctx.logger.debug('command: {0} '.format(command))
- ctx.logger.debug('output: {0} '.format(output))
- ctx.logger.debug('error: {0} '.format(error))
- ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
-
- if process.returncode:
- ctx.logger.error('Running `{0}` returns error.'.format(command))
- return False
-
- return True
-
-
-def execute_command(_command):
-
- ctx.logger.debug('_command {0}.'.format(_command))
-
- subprocess_args = {
- 'args': _command.split(),
- 'stdout': subprocess.PIPE,
- 'stderr': subprocess.PIPE
- }
-
- ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
-
- process = subprocess.Popen(**subprocess_args)
- output, error = process.communicate()
-
- ctx.logger.debug('command: {0} '.format(_command))
- ctx.logger.debug('output: {0} '.format(output))
- ctx.logger.debug('error: {0} '.format(error))
- ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
-
- if process.returncode:
- ctx.logger.error('Running `{0}` returns error.'.format(_command))
- return False
-
- return output
-
-
-if __name__ == '__main__':
-
- docker_command = 'docker ps'
-
- if not check_command(docker_command):
- raise OperationRetry('Waiting for docker to be installed.')
-
- finished = False
- ps = execute_command('ps -ef')
- for line in ps.split('\n'):
- if '/usr/bin/python /usr/bin/cloud-init modules' in line:
- ctx.logger.error('in line')
- raise OperationRetry('Waiting for Cloud Init to finish.')
-
- ctx.logger.info('Docker is ready and Cloud Init finished.')
diff --git a/cloudify/scripts/onap/configure_docker_secret_workaround.py b/cloudify/scripts/onap/configure_docker_secret_workaround.py
deleted file mode 100644
index 6e9deff059..0000000000
--- a/cloudify/scripts/onap/configure_docker_secret_workaround.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from fabric.api import run
-
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-
-def _retrieve_namespace():
- namespace = ctx.node.properties.get(
- 'namespace',
- ctx.node.properties
- .get('options', {})
- .get('namespace', None)
- )
-
- if not namespace:
- raise NonRecoverableError(
- 'Namespace is not defined (node={})'.format(ctx.node.name)
- )
-
- return namespace
-
-
-def configure_secret():
- namespace = _retrieve_namespace()
- ctx.logger.info(
- 'Configuring docker secrets for namespace: {0}'.format(namespace)
- )
-
- command = 'kubectl create secret ' \
- 'docker-registry onap-docker-registry-key ' \
- '--docker-server=nexus3.onap.org:10001 ' \
- '--docker-username=docker ' \
- '--docker-password=docker ' \
- '--docker-email=email@email.com ' \
- '--namespace={0}'.format(namespace)
-
- ctx.logger.info('Command "{0}" will be executed'.format(command))
- run(command)
-
- ctx.logger.info('Docker secrets configured successfully')
diff --git a/cloudify/scripts/onap/create_init_pod.py b/cloudify/scripts/onap/create_init_pod.py
deleted file mode 100644
index c82172d15f..0000000000
--- a/cloudify/scripts/onap/create_init_pod.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import pip
-
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-
-SERVICES_FILE_PARTS_SEPARATOR = '---'
-
-
-def _import_or_install():
- try:
- import yaml
- except ImportError:
- pip.main(["install", "pyaml"])
-
- try:
- import cloudify_kubernetes.tasks as kubernetes_plugin
- except ImportError:
- pip.main([
- "install",
- "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
- ])
-
- import yaml
- import cloudify_kubernetes.tasks as kubernetes_plugin
-
- return yaml, kubernetes_plugin
-
-
-def _retrieve_path():
- return ctx.node.properties.get('init_pod', None)
-
-
-def _save_deployment_result(key):
- result = ctx.instance.runtime_properties['kubernetes']
- ctx.instance.runtime_properties[key] = result
- ctx.instance.runtime_properties['kubernetes'] = {}
-
-
-def _do_create_init_pod(kubernetes_plugin, yaml):
- ctx.logger.info('Creating init pod')
- init_pod_file_path = _retrieve_path()
-
- if not init_pod_file_path:
- raise NonRecoverableError('Init pod file is not defined.')
-
- temp_file_path = ctx.download_resource_and_render(
- init_pod_file_path
- )
-
- with open(temp_file_path) as temp_file:
- init_pod_file_content = temp_file.read()
- init_pod_yaml_content = yaml.load(init_pod_file_content)
-
- kubernetes_plugin.resource_create(definition=init_pod_yaml_content)
- _save_deployment_result('init_pod')
-
- ctx.logger.info('Init pod created successfully')
-
-
-if __name__ == '__main__':
- yaml, kubernetes_plugin = _import_or_install()
-
- _do_create_init_pod(kubernetes_plugin, yaml)
-
diff --git a/cloudify/scripts/onap/create_resources_services.py b/cloudify/scripts/onap/create_resources_services.py
deleted file mode 100644
index 8548e29b70..0000000000
--- a/cloudify/scripts/onap/create_resources_services.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import pip
-
-from cloudify import ctx
-
-
-SERVICES_FILE_PARTS_SEPARATOR = '---'
-
-
-def _import_or_install():
- try:
- import yaml
- except ImportError:
- pip.main(["install", "pyaml"])
-
- try:
- import cloudify_kubernetes.tasks as kubernetes_plugin
- except ImportError:
- pip.main([
- "install",
- "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
- ])
-
- try:
- import jinja2
- except ImportError:
- pip.main(["install", "jinja2"])
-
- import yaml
- import jinja2
- import cloudify_kubernetes.tasks as kubernetes_plugin
-
- return yaml, kubernetes_plugin, jinja2
-
-
-def _init_jinja(jinja2):
- return jinja2.Environment(
- loader=jinja2.BaseLoader()
- )
-
-
-def _render_template(jinja_env, template_content, values):
- template_content = template_content.replace('.Values', 'Values')
-
- template = jinja_env.from_string(template_content)
- rendered_template = template.render(Values=values)
- return rendered_template
-
-
-def _retrieve_resources_paths():
- return ctx.node.properties.get('resources', [])
-
-
-def _retrieve_services_paths():
- return ctx.node.properties.get('services', None)
-
-
-def _retrieve_values(yaml):
- values_file_path = ctx.node.properties.get('values', None)
-
- if values_file_path:
- return yaml.load(ctx.get_resource(values_file_path))
-
- ctx.logger.warn('Values file not found')
-
-
-def _save_deployment_result(key):
- result = ctx.instance.runtime_properties['kubernetes']
- ctx.instance.runtime_properties[key] = result
- ctx.instance.runtime_properties['kubernetes'] = {}
-
-
-def _do_create_resources(kubernetes_plugin, yaml, jinja_env, values):
- for path in _retrieve_resources_paths():
- ctx.logger.info('Creating resource defined in: {0}'.format(path))
-
- template_content = ctx.get_resource(path)
- yaml_content = _render_template(
- jinja_env,
- template_content,
- values
- )
- content = yaml.load(yaml_content)
-
- kubernetes_plugin.resource_create(definition=content)
- _save_deployment_result(
- 'resource_{0}'.format(content['metadata']['name'])
- )
-
- ctx.logger.info('Resources created successfully')
-
-
-def _do_create_services(kubernetes_plugin, yaml, jinja_env, values):
- ctx.logger.info('Creating services')
- services_file_path = _retrieve_services_paths()
-
- if not services_file_path:
- ctx.logger.warn(
- 'Service file is not defined. Skipping services provisioning !'
- )
-
- return
-
- template_content = ctx.get_resource(services_file_path)
- yaml_content = _render_template(
- jinja_env,
- template_content,
- values
- )
-
- yaml_content_parts = \
- yaml_content.split(SERVICES_FILE_PARTS_SEPARATOR)
-
- for yaml_content_part in yaml_content_parts:
- content = yaml.load(yaml_content_part)
-
- kubernetes_plugin.resource_create(definition=content)
- _save_deployment_result(
- 'service_{0}'.format(content['metadata']['name'])
- )
-
- ctx.logger.info('Services created successfully')
-
-
-if __name__ == '__main__':
- yaml, kubernetes_plugin, jinja2 = _import_or_install()
- jinja_env = _init_jinja(jinja2)
- values = _retrieve_values(yaml)
-
- _do_create_resources(kubernetes_plugin, yaml, jinja_env, values)
- _do_create_services(kubernetes_plugin, yaml, jinja_env, values)
-
diff --git a/cloudify/scripts/onap/delete_init_pod.py b/cloudify/scripts/onap/delete_init_pod.py
deleted file mode 100644
index 1da805b959..0000000000
--- a/cloudify/scripts/onap/delete_init_pod.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import pip
-
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-
-SERVICES_FILE_PARTS_SEPARATOR = '---'
-
-
-def _import_or_install():
- try:
- import yaml
- except ImportError:
- pip.main(["install", "pyaml"])
-
- try:
- import cloudify_kubernetes.tasks as kubernetes_plugin
- except ImportError:
- pip.main([
- "install",
- "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
- ])
-
- import yaml
- import cloudify_kubernetes.tasks as kubernetes_plugin
-
- return yaml, kubernetes_plugin
-
-
-def _retrieve_path():
- return ctx.node.properties.get('init_pod', None)
-
-
-def _set_deployment_result(key):
- result = ctx.instance.runtime_properties.pop(key)
- ctx.instance.runtime_properties['kubernetes'] = result
-
-
-def _do_delete_init_pod(kubernetes_plugin, yaml):
- ctx.logger.info('Deleting init pod')
- init_pod_file_path = _retrieve_path()
-
- if not init_pod_file_path:
- raise NonRecoverableError('Init pod file is not defined.')
-
- temp_file_path = ctx.download_resource_and_render(
- init_pod_file_path
- )
-
- with open(temp_file_path) as temp_file:
- init_pod_file_content = temp_file.read()
- init_pod_yaml_content = yaml.load(init_pod_file_content)
-
- _set_deployment_result('init_pod')
- kubernetes_plugin.resource_delete(definition=init_pod_yaml_content)
-
- ctx.logger.info('Init pod deleted successfully')
-
-
-if __name__ == '__main__':
- yaml, kubernetes_plugin = _import_or_install()
-
- _do_delete_init_pod(kubernetes_plugin, yaml)
-
diff --git a/cloudify/scripts/onap/delete_namespace.py b/cloudify/scripts/onap/delete_namespace.py
deleted file mode 100644
index 6973e59944..0000000000
--- a/cloudify/scripts/onap/delete_namespace.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import pip
-
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-
-def _import_or_install():
- try:
- import yaml
- except ImportError:
- pip.main(["install", "pyaml"])
-
- try:
- import cloudify_kubernetes.tasks as kubernetes_plugin
- except ImportError:
- pip.main([
- "install",
- "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
- ])
-
- import yaml
- import cloudify_kubernetes.tasks as kubernetes_plugin
-
- return yaml, kubernetes_plugin
-
-
-def _retrieve_namespace():
- namespace = ctx.node.properties.get(
- 'namespace',
- ctx.node.properties
- .get('options', {})
- .get('namespace', None)
- )
-
- if not namespace:
- raise NonRecoverableError(
- 'Namespace is not defined (node={})'.format(ctx.node.name)
- )
-
- return namespace
-
-
-def _prepare_namespace_resource_template(name):
- return {
- 'definition': {
- 'apiVersion': 'v1',
- 'kind': 'Namespace',
- 'metadata': {
- 'name': name,
- 'labels': {
- 'name': name
- },
- },
- },
- 'api_mapping': {
- 'create': {
- 'api': 'CoreV1Api',
- 'method': 'create_namespace',
- 'payload': 'V1Namespace'
- },
- 'read': {
- 'api': 'CoreV1Api',
- 'method': 'read_namespace',
- },
- 'delete': {
- 'api': 'CoreV1Api',
- 'method': 'delete_namespace',
- 'payload': 'V1DeleteOptions'
- }
- }
- }
-
-
-def _set_deployment_result(key):
- result = ctx.instance.runtime_properties.pop(key)
- ctx.instance.runtime_properties['kubernetes'] = result
-
-
-def _do_delete_namespace(kubernetes_plugin):
- namespace = _retrieve_namespace()
- ctx.logger.info('Deleting namespace: {0}'.format(namespace))
-
- namespace_resource_template = _prepare_namespace_resource_template(
- namespace
- )
-
- ctx.logger.debug(
- 'Kubernetes object which will be deleted: {0}'
- .format(namespace_resource_template)
- )
-
- _set_deployment_result('namespace')
- kubernetes_plugin.custom_resource_delete(**namespace_resource_template)
- ctx.logger.info('Namespace deleted successfully')
-
-
-if __name__ == '__main__':
- _, kubernetes_plugin = _import_or_install()
-
- _do_delete_namespace(kubernetes_plugin)
-
diff --git a/cloudify/scripts/onap/delete_resources_services.py b/cloudify/scripts/onap/delete_resources_services.py
deleted file mode 100644
index 305a7484bd..0000000000
--- a/cloudify/scripts/onap/delete_resources_services.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import pip
-
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-
-SERVICES_FILE_PARTS_SEPARATOR = '---'
-
-
-def _import_or_install():
- try:
- import yaml
- except ImportError:
- pip.main(["install", "pyaml"])
-
- try:
- import cloudify_kubernetes.tasks as kubernetes_plugin
- except ImportError:
- pip.main([
- "install",
- "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
- ])
-
- try:
- import jinja2
- except ImportError:
- pip.main(["install", "jinja2"])
-
- import yaml
- import jinja2
- import cloudify_kubernetes.tasks as kubernetes_plugin
-
- return yaml, kubernetes_plugin, jinja2
-
-
-def _init_jinja(jinja2):
- return jinja2.Environment(
- loader=jinja2.BaseLoader()
- )
-
-
-def _render_template(jinja_env, template_content, values):
- template_content = template_content.replace('.Values', 'Values')
-
- template = jinja_env.from_string(template_content)
- rendered_template = template.render(Values=values)
- return rendered_template
-
-
-def _retrieve_resources_paths():
- return ctx.node.properties.get('resources', [])
-
-
-def _retrieve_services_paths():
- return ctx.node.properties.get('services', None)
-
-
-def _retrieve_values(yaml):
- values_file_path = ctx.node.properties.get('values', None)
-
- if values_file_path:
- return yaml.load(ctx.get_resource(values_file_path))
-
- ctx.logger.warn('Values file not found')
-
-
-def _set_deployment_result(key):
- result = ctx.instance.runtime_properties.pop(key)
- ctx.instance.runtime_properties['kubernetes'] = result
-
-
-def _do_delete_resources(kubernetes_plugin, yaml, jinja_env, values):
- for path in _retrieve_resources_paths():
- ctx.logger.info('Deleting resource defined in: {0}'.format(path))
-
- template_content = ctx.get_resource(path)
- yaml_content = _render_template(
- jinja_env,
- template_content,
- values
- )
- content = yaml.load(yaml_content)
-
- _set_deployment_result(
- 'resource_{0}'.format(content['metadata']['name'])
- )
- kubernetes_plugin.resource_delete(definition=content)
-
- ctx.logger.info('Resources deleted successfully')
-
-
-def _do_delete_services(kubernetes_plugin, yaml, jinja_env, values):
- ctx.logger.info('Deleting services')
- services_file_path = _retrieve_services_paths()
-
- if not services_file_path:
- ctx.logger.warn(
- 'Service file is not defined. Skipping services provisioning !'
- )
-
- return
-
- template_content = ctx.get_resource(services_file_path)
- yaml_content = _render_template(
- jinja_env,
- template_content,
- values
- )
-
- yaml_content_parts = \
- yaml_content.split(SERVICES_FILE_PARTS_SEPARATOR)
-
- for yaml_content_part in yaml_content_parts:
- content = yaml.load(yaml_content_part)
-
- _set_deployment_result(
- 'service_{0}'.format(content['metadata']['name'])
- )
- kubernetes_plugin.resource_delete(definition=content)
-
- ctx.logger.info('Services deleted successfully')
-
-
-if __name__ == '__main__':
- yaml, kubernetes_plugin, jinja2 = _import_or_install()
- jinja_env = _init_jinja(jinja2)
- values = _retrieve_values(yaml)
-
- _do_delete_services(kubernetes_plugin, yaml, jinja_env, values)
- _do_delete_resources(kubernetes_plugin, yaml, jinja_env, values)
-
-
diff --git a/cloudify/scripts/tasks.py b/cloudify/scripts/tasks.py
deleted file mode 100644
index 035a780cb3..0000000000
--- a/cloudify/scripts/tasks.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-
-from fabric.api import run
-
-
-def label_node(labels, hostname):
- if labels:
- label_list = []
- for key, value in labels.items():
- label_pair_string = '%s=%s' % (key, value)
- label_list.append(label_pair_string)
- label_string = ' '.join(label_list)
- command = 'kubectl label nodes %s %s' % (hostname, label_string)
- run(command)
-
-
-def stop_node(hostname):
- command = 'kubectl drain %s' % (hostname)
- run(command)
-
-
-def delete_node(hostname):
- command = 'kubectl delete no %s' % (hostname)
- run(command)
diff --git a/cloudify/types/kubernetes.yaml b/cloudify/types/kubernetes.yaml
deleted file mode 100644
index 1698aa210e..0000000000
--- a/cloudify/types/kubernetes.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-inputs:
-
- join_command:
- type: string
-
- labels:
- default:
- app: { get_input: app_name }
-
- kubernetes_master_ip:
- type: string
-
- kubernetes_master_agent_user:
- default: { get_input: agent_user }
-
-node_types:
-
- cloudify.nodes.Kubernetes.Node:
- derived_from: cloudify.nodes.Root
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- implementation: cloudify/scripts/create.py
- configure:
- implementation: cloudify/scripts/configure_node.py
- inputs:
- join_command:
- default: { get_input: join_command }
- start:
- implementation: fabric.fabric_plugin.tasks.run_task
- inputs:
- tasks_file:
- default: cloudify/scripts/tasks.py
- task_name:
- default: label_node
- task_properties:
- default:
- hostname: { get_attribute: [ SELF, hostname ] }
- labels: { get_input: labels }
- fabric_env:
- default:
- host_string: { get_input: kubernetes_master_ip }
- user: { get_input: kubernetes_master_agent_user }
- key: { get_secret: agent_key_private }
-# stop:
-# implementation: fabric.fabric_plugin.tasks.run_task
-# inputs:
-# tasks_file:
-# default: cloudify/scripts/tasks.py
-# task_name:
-# default: stop_node
-# task_properties:
-# default:
-# hostname: { get_attribute: [ SELF, hostname ] }
-# fabric_env:
-# default:
-# host_string: { get_input: kubernetes_master_ip }
-# user: { get_input: kubernetes_master_agent_user }
-# key: { get_secret: agent_key_private }
- delete:
- implementation: fabric.fabric_plugin.tasks.run_task
- inputs:
- tasks_file:
- default: cloudify/scripts/tasks.py
- task_name:
- default: delete_node
- task_properties:
- default:
- hostname: { get_attribute: [ SELF, hostname ] }
- fabric_env:
- default:
- host_string: { get_input: kubernetes_master_ip }
- user: { get_input: kubernetes_master_agent_user }
- key: { get_secret: agent_key_private }
-
- cloudify.kubernetes.resources.Namespace:
- derived_from: cloudify.kubernetes.resources.Main
- properties:
- _api_mapping:
- default:
- create:
- api: CoreV1Api
- method: create_namespace
- payload: V1Namespace
- read:
- api: CoreV1Api
- method: read_namespace
- delete:
- api: CoreV1Api
- method: delete_namespace
- payload: V1DeleteOptions
diff --git a/cloudify/types/onap.yaml b/cloudify/types/onap.yaml
deleted file mode 100644
index 7e9b83425e..0000000000
--- a/cloudify/types/onap.yaml
+++ /dev/null
@@ -1,89 +0,0 @@
-node_types:
- cloudify.onap.kubernetes.Environment:
- derived_from: cloudify.nodes.Root
- properties:
- namespace:
- type: string
- init_pod:
- type: string
- description: >
- Path to init pod YAML file
- options:
- description: >
- For compatibility with kubernetes plugin.
- To be removed in the future.
- default: {}
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- implementation: cloudify/scripts/onap/create_namespace.py
- executor: central_deployment_agent
- start:
- implementation: cloudify/scripts/onap/create_init_pod.py
- executor: central_deployment_agent
- stop:
- implementation: cloudify/scripts/onap/delete_init_pod.py
- executor: central_deployment_agent
- delete:
- implementation: cloudify/scripts/onap/delete_namespace.py
- executor: central_deployment_agent
-
- cloudify.onap.kubernetes.App:
- derived_from: cloudify.nodes.Root
- properties:
- name:
- type: string
- description: >
- Name of ONAP app
- values:
- type: string
- description: >
- Paths (relative, blueprint prespective) to values.yaml file
- required: false
- resources:
- description: >
- List of paths (relative, blueprint prespective)
- to all kubernetes resources YAML files definition
- default: []
- services:
- type: string
- description: >
- Path (relative, blueprint prespective)
- to kubernetes app services YAML file definition
- default: []
- inputs:
- description: >
- Parameters required to create kubernetes resources for each app
- default: {}
- options:
- description: >
- For compatibility with kubernetes plugin.
- To be removed in the future.
- default: {}
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- implementation: cloudify/scripts/onap/create_namespace.py
- executor: central_deployment_agent
- configure:
- implementation: fabric.fabric_plugin.tasks.run_task
- executor: central_deployment_agent
- inputs:
- tasks_file:
- default: cloudify/scripts/onap/configure_docker_secret_workaround.py
- task_name:
- default: configure_secret
- fabric_env:
- default:
- host_string: { get_secret: kubernetes_master_ip }
- user: { get_secret: agent_user }
- key: { get_secret: agent_key_private }
- start:
- implementation: cloudify/scripts/onap/create_resources_services.py
- executor: central_deployment_agent
- stop:
- implementation: cloudify/scripts/onap/delete_resources_services.py
- executor: central_deployment_agent
- delete:
- implementation: cloudify/scripts/onap/delete_namespace.py
- executor: central_deployment_agent
diff --git a/kubernetes/config/docker/init/src/config/log/aai/model-loader/logback.xml b/kubernetes/aai/resources/model-loader/conf/logback.xml
index cdef6d3708..d512d3b91c 100644
--- a/kubernetes/config/docker/init/src/config/log/aai/model-loader/logback.xml
+++ b/kubernetes/aai/resources/model-loader/conf/logback.xml
@@ -1,163 +1,161 @@
-<configuration debug="false" scan="true" scanPeriod="3 seconds">
- <!--<jmxConfigurator /> -->
- <!-- directory path for all other type logs -->
- <property name="logDir" value="/var/log/onap" />
- <!-- specify the component name <ECOMP-component-name>::= "MSO" | "DCAE"
- | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC" -->
- <property name="componentName" value="aai"></property>
- <property name="subComponentName" value="aai-ml"></property>
- <property name="logDirectory" value="${logDir}/${componentName}/${subComponentName}" />
- <!-- default eelf log file names -->
- <property name="generalLogName" value="error" />
- <property name="metricsLogName" value="metrics" />
- <property name="auditLogName" value="audit" />
- <property name="debugLogName" value="debug" />
- <property name="queueSize" value="256" />
- <property name="maxFileSize" value="50MB" />
- <property name="maxHistory" value="30" />
- <property name="totalSizeCap" value="10GB" />
- <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger{36}\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
- <!-- Example evaluator filter applied against console appender -->
- <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <!-- ============================================================================ -->
- <!-- EELF Appenders -->
- <!-- ============================================================================ -->
- <!-- The EELFAppender is used to record events to the general application
- log -->
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELF">
- <file>${logDirectory}/${generalLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELF">
- <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
- <level>INFO</level>
- </filter>
- <queueSize>256</queueSize>
- <appender-ref ref="EELF" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFAudit">
- <file>${logDirectory}/${auditLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFAudit">
- <queueSize>256</queueSize>
- <appender-ref ref="EELFAudit" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFMetrics">
- <file>${logDirectory}/${metricsLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFMetrics">
- <queueSize>256</queueSize>
- <appender-ref ref="EELFMetrics" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFDebug">
- <file>${logDirectory}/${debugLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFDebug">
- <queueSize>256</queueSize>
- <appender-ref ref="EELFDebug" />
- <includeCallerData>true</includeCallerData>
- </appender>
- <!-- ============================================================================ -->
- <!-- EELF loggers -->
- <!-- ============================================================================ -->
- <logger additivity="false" level="info" name="com.att.eelf">
- <appender-ref ref="asyncEELF" />
- <appender-ref ref="asyncEELFDebug" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.security">
- <appender-ref ref="asyncEELFSecurity" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.perf">
- <appender-ref ref="asyncEELFPerformance" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.server">
- <appender-ref ref="asyncEELFServer" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.policy">
- <appender-ref ref="asyncEELFPolicy" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.audit">
- <appender-ref ref="asyncEELFAudit" />
- </logger>
- <logger additivity="false" level="info" name="com.att.eelf.metrics">
- <appender-ref ref="asyncEELFMetrics" />
- </logger>
- <!-- Spring related loggers -->
- <logger level="WARN" name="org.springframework" />
- <logger level="WARN" name="org.springframework.beans" />
- <logger level="WARN" name="org.springframework.web" />
- <logger level="WARN" name="com.blog.spring.jms" />
- <logger level="INFO" name="com.att" />
- <!-- Model Loader loggers -->
- <logger level="INFO" name="org.openecomp.modelloader" />
- <!-- Other Loggers that may help troubleshoot -->
- <logger level="WARN" name="net.sf" />
- <logger level="WARN" name="org.apache.commons.httpclient" />
- <logger level="WARN" name="org.apache.commons" />
- <logger level="WARN" name="org.apache.coyote" />
- <logger level="WARN" name="org.apache.jasper" />
- <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
- May aid in troubleshooting) -->
- <logger level="WARN" name="org.apache.camel" />
- <logger level="WARN" name="org.apache.cxf" />
- <logger level="WARN" name="org.apache.camel.processor.interceptor" />
- <logger level="WARN" name="org.apache.cxf.jaxrs.interceptor" />
- <logger level="WARN" name="org.apache.cxf.service" />
- <logger level="WARN" name="org.restlet" />
- <logger level="WARN" name="org.apache.camel.component.restlet" />
- <!-- logback internals logging -->
- <logger level="WARN" name="ch.qos.logback.classic" />
- <logger level="WARN" name="ch.qos.logback.core" />
- <root>
- <appender-ref ref="asyncEELF" />
- <!-- <appender-ref ref="asyncEELFDebug" /> -->
- </root>
-</configuration>
+<configuration debug="false" scan="true" scanPeriod="3 seconds">
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+ <property name="logDir" value="/var/log/onap" />
+ <property name="componentName" value="aai"></property>
+ <property name="subComponentName" value="aai-ml"></property>
+ <property name="logDirectory" value="${logDir}/${componentName}/${subComponentName}" />
+ <!-- default eelf log file names -->
+ <property name="generalLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+ <property name="queueSize" value="256" />
+ <property name="maxFileSize" value="50MB" />
+ <property name="maxHistory" value="30" />
+ <property name="totalSizeCap" value="10GB" />
+ <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
+ <!-- Example evaluator filter applied against console appender -->
+ <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELF">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELF">
+ <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFAudit">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFAudit">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFMetrics">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFMetrics">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFMetrics" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFDebug">
+ <file>${logDirectory}/${debugLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFDebug">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>true</includeCallerData>
+ </appender>
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger additivity="false" level="info" name="com.att.eelf">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFDebug" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.security">
+ <appender-ref ref="asyncEELFSecurity" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.perf">
+ <appender-ref ref="asyncEELFPerformance" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.server">
+ <appender-ref ref="asyncEELFServer" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.policy">
+ <appender-ref ref="asyncEELFPolicy" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.audit">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.metrics">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+ <!-- Spring related loggers -->
+ <logger level="WARN" name="org.springframework" />
+ <logger level="WARN" name="org.springframework.beans" />
+ <logger level="WARN" name="org.springframework.web" />
+ <logger level="WARN" name="com.blog.spring.jms" />
+ <logger level="INFO" name="com.att" />
+ <!-- Model Loader loggers -->
+ <logger level="INFO" name="org.openecomp.modelloader" />
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger level="WARN" name="net.sf" />
+ <logger level="WARN" name="org.apache.commons.httpclient" />
+ <logger level="WARN" name="org.apache.commons" />
+ <logger level="WARN" name="org.apache.coyote" />
+ <logger level="WARN" name="org.apache.jasper" />
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger level="WARN" name="org.apache.camel" />
+ <logger level="WARN" name="org.apache.cxf" />
+ <logger level="WARN" name="org.apache.camel.processor.interceptor" />
+ <logger level="WARN" name="org.apache.cxf.jaxrs.interceptor" />
+ <logger level="WARN" name="org.apache.cxf.service" />
+ <logger level="WARN" name="org.restlet" />
+ <logger level="WARN" name="org.apache.camel.component.restlet" />
+ <!-- logback internals logging -->
+ <logger level="WARN" name="ch.qos.logback.classic" />
+ <logger level="WARN" name="ch.qos.logback.core" />
+ <root>
+ <appender-ref ref="asyncEELF" />
+ <!-- <appender-ref ref="asyncEELFDebug" /> -->
+ </root>
+</configuration>
diff --git a/kubernetes/aai/resources/resources/conf/logback.xml b/kubernetes/aai/resources/resources/conf/logback.xml
new file mode 100644
index 0000000000..bf334dfb0e
--- /dev/null
+++ b/kubernetes/aai/resources/resources/conf/logback.xml
@@ -0,0 +1,345 @@
+<configuration debug="false" scan="true" scanPeriod="3 seconds">
+ <contextName>${module.ajsc.namespace.name}</contextName>
+ <jmxConfigurator />
+ <property name="logDir" value="/var/log/onap" />
+ <property name="componentName" value="aai"></property>
+ <property name="subcomponentName" value="aai-resources"></property>
+ <property name="restLogDirectory" value="${logDir}/${componentName}/${subcomponentName}/rest" />
+ <property name="dmaapLogDirectory" value="${logDir}/${componentName}/${subcomponentName}/dmaapAAIEventConsumer" />
+ <property name="perfLogsDirectory" value="${logDir}/${componentName}/${subcomponentName}/perf-audit" />
+ <!-- default eelf log file names -->
+ <property name="errorLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+ <property name="queueSize" value="256" />
+ <property name="maxFileSize" value="50MB" />
+ <property name="maxHistory" value="30" />
+ <property name="totalSizeCap" value="10GB" />
+ <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
+ <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="SANE">
+ <file>${restLogDirectory}/sane.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/sane.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncSANE">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="SANE" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="METRIC">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <file>${restLogDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncMETRIC">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="METRIC" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="DEBUG">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>DEBUG</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <file>${restLogDirectory}/${debugLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncDEBUG">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="DEBUG" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="ERROR">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>WARN</level>
+ </filter>
+ <file>${restLogDirectory}/${errorLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncERROR">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="ERROR" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="AUDIT">
+ <file>${restLogDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncAUDIT">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="AUDIT" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="translog">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>DEBUG</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <file>${restLogDirectory}/translog.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/translog.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asynctranslog">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="translog" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIEventConsumer">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>WARN</level>
+ </filter>
+ <File>${dmaapLogDirectory}/${errorLogName}.log</File>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${dmaapLogDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIEventConsumerDebug">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>DEBUG</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <File>${dmaapLogDirectory}/${debugLogName}.log</File>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${dmaapLogDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIEventConsumerMetric">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <File>${dmaapLogDirectory}/${metricsLogName}.log</File>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${dmaapLogDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <!-- Spring related loggers -->
+ <logger level="WARN" name="org.springframework" />
+ <logger level="WARN" name="org.springframework.beans" />
+ <logger level="WARN" name="org.springframework.web" />
+ <logger level="WARN" name="com.blog.spring.jms" />
+ <!-- AJSC Services (bootstrap services) -->
+ <logger level="WARN" name="ajsc" />
+ <logger level="WARN" name="ajsc.RouteMgmtService" />
+ <logger level="WARN" name="ajsc.ComputeService" />
+ <logger level="WARN" name="ajsc.VandelayService" />
+ <logger level="WARN" name="ajsc.FilePersistenceService" />
+ <logger level="WARN" name="ajsc.UserDefinedJarService" />
+ <logger level="WARN" name="ajsc.UserDefinedBeansDefService" />
+ <logger level="WARN" name="ajsc.LoggingConfigurationService" />
+ <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+ logging) -->
+ <logger level="WARN" name="ajsc.utils" />
+ <logger level="WARN" name="ajsc.utils.DME2Helper" />
+ <logger level="WARN" name="ajsc.filters" />
+ <logger level="WARN" name="ajsc.beans.interceptors" />
+ <logger level="WARN" name="ajsc.restlet" />
+ <logger level="WARN" name="ajsc.servlet" />
+ <logger level="WARN" name="com.att.ajsc" />
+ <logger level="WARN" name="com.att.ajsc.csi.logging" />
+ <logger level="WARN" name="com.att.ajsc.filemonitor" />
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger level="WARN" name="net.sf" />
+ <logger level="WARN" name="org.apache.commons.httpclient" />
+ <logger level="WARN" name="org.apache.commons" />
+ <logger level="WARN" name="org.apache.coyote" />
+ <logger level="WARN" name="org.apache.jasper" />
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger level="WARN" name="org.apache.camel" />
+ <logger level="WARN" name="org.apache.cxf" />
+ <logger level="WARN" name="org.apache.camel.processor.interceptor" />
+ <logger level="WARN" name="org.apache.cxf.jaxrs.interceptor" />
+ <logger level="WARN" name="org.apache.cxf.service" />
+ <logger level="WARN" name="org.restlet" />
+ <logger level="WARN" name="org.apache.camel.component.restlet" />
+ <!-- logback internals logging -->
+ <logger level="WARN" name="ch.qos.logback.classic" />
+ <logger level="WARN" name="ch.qos.logback.core" />
+ <!-- logback jms appenders & loggers definition starts here -->
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="auditLogs">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter" />
+ <file>${perfLogsDirectory}/Audit.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${perfLogsDirectory}/Audit.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="perfLogs">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter" />
+ <file>${perfLogsDirectory}/Perform.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${perfLogsDirectory}/Perform.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="ASYNC-audit">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="Audit-Record-Queue" />
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="ASYNC-perf">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="Performance-Tracker-Queue" />
+ </appender>
+ <logger additivity="false" level="INFO" name="AuditRecord">
+ <appender-ref ref="ASYNC-audit" />
+ <appender-ref ref="auditLogs" />
+ </logger>
+ <logger additivity="false" level="INFO" name="AuditRecord_DirectCall">
+ <appender-ref ref="ASYNC-audit" />
+ <appender-ref ref="auditLogs" />
+ </logger>
+ <logger additivity="false" level="INFO" name="PerfTrackerRecord">
+ <appender-ref ref="ASYNC-perf" />
+ <appender-ref ref="perfLogs" />
+ </logger>
+ <!-- logback jms appenders & loggers definition ends here -->
+ <logger additivity="false" level="DEBUG" name="org.openecomp.aai.interceptors">
+ <appender-ref ref="asynctranslog" />
+ </logger>
+ <logger level="DEBUG" name="org.openecomp.aai.interceptors.PreAaiAjscInterceptor">
+ <appender-ref ref="asyncAUDIT" />
+ </logger>
+ <logger level="DEBUG" name="org.openecomp.aai.interceptors.PostAaiAjscInterceptor">
+ <appender-ref ref="asyncAUDIT" />
+ </logger>
+ <logger additivity="false" level="DEBUG" name="org.openecomp.aai.dmaap">
+ <appender-ref ref="dmaapAAIEventConsumer" />
+ <appender-ref ref="dmaapAAIEventConsumerDebug" />
+ <appender-ref ref="dmaapAAIEventConsumerMetric" />
+ </logger>
+ <logger level="WARN" name="org.apache" />
+ <logger level="WARN" name="org.zookeeper" />
+ <logger level="WARN" name="com.thinkaurelius" />
+ <!-- ============================================================================ -->
+ <!-- General EELF logger -->
+ <!-- ============================================================================ -->
+ <logger additivity="false" level="WARN" name="com.att.eelf">
+ <appender-ref ref="asyncDEBUG" />
+ <appender-ref ref="asyncERROR" />
+ <appender-ref ref="asyncMETRIC" />
+ <appender-ref ref="asyncAUDIT" />
+ </logger>
+ <root level="INFO">
+ <appender-ref ref="asyncDEBUG" />
+ </root>
+</configuration>
diff --git a/kubernetes/aai/resources/search-data-service/conf/logback.xml b/kubernetes/aai/resources/search-data-service/conf/logback.xml
new file mode 100644
index 0000000000..14ed4142fe
--- /dev/null
+++ b/kubernetes/aai/resources/search-data-service/conf/logback.xml
@@ -0,0 +1,188 @@
+<configuration debug="false" scan="true" scanPeriod="3 seconds">
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+ <property name="logDir" value="/var/log/onap" />
+ <property name="componentName" value="aai"></property>
+ <property name="subcomponentName" value="aai-sdb" />
+ <property name="logDirectory" value="${logDir}/${componentName}/${subcomponentName}" />
+ <!-- default eelf log file names -->
+ <property name="errorLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+ <property name="queueSize" value="256" />
+ <property name="maxFileSize" value="50MB" />
+ <property name="maxHistory" value="30" />
+ <property name="totalSizeCap" value="10GB" />
+ <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
+ <!-- Example evaluator filter applied against console appender -->
+ <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELF">
+ <file>${logDirectory}/${errorLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELF">
+ <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+ <!-- EELF Audit Appender. This appender is used to record audit engine
+ related logging events. The audit logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFAudit">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFAudit">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFMetrics">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} -
+ %msg%n"</pattern> -->
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFMetrics">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFMetrics" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFDebug">
+ <file>${logDirectory}/${debugLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFDebug">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>false</includeCallerData>
+ </appender>
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger additivity="false" level="info" name="com.att.eelf">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFDebug" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.security">
+ <appender-ref ref="asyncEELFSecurity" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.perf">
+ <appender-ref ref="asyncEELFPerformance" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.server">
+ <appender-ref ref="asyncEELFServer" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.policy">
+ <appender-ref ref="asyncEELFPolicy" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.audit">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+ <logger additivity="false" level="info" name="com.att.eelf.metrics">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+ <!-- Spring related loggers -->
+ <logger level="WARN" name="org.springframework" />
+ <logger level="WARN" name="org.springframework.beans" />
+ <logger level="WARN" name="org.springframework.web" />
+ <logger level="WARN" name="com.blog.spring.jms" />
+ <!-- AJSC Services (bootstrap services) -->
+ <logger level="WARN" name="ajsc" />
+ <logger level="WARN" name="ajsc.RouteMgmtService" />
+ <logger level="WARN" name="ajsc.ComputeService" />
+ <logger level="WARN" name="ajsc.VandelayService" />
+ <logger level="WARN" name="ajsc.FilePersistenceService" />
+ <logger level="WARN" name="ajsc.UserDefinedJarService" />
+ <logger level="WARN" name="ajsc.UserDefinedBeansDefService" />
+ <logger level="WARN" name="ajsc.LoggingConfigurationService" />
+ <logger level="WARN" name="ajsc.ErrorMessageLookupService" />
+ <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+ logging) -->
+ <logger level="WARN" name="ajsc.utils" />
+ <logger level="WARN" name="ajsc.utils.DME2Helper" />
+ <logger level="WARN" name="ajsc.filters" />
+ <logger level="WARN" name="ajsc.beans.interceptors" />
+ <logger level="WARN" name="ajsc.restlet" />
+ <logger level="WARN" name="ajsc.servlet" />
+ <logger level="INFO" name="com.att" />
+ <logger level="WARN" name="com.att.ajsc.csi.logging" />
+ <logger level="WARN" name="com.att.ajsc.filemonitor" />
+ <!-- SearchDB loggers -->
+ <logger level="INFO" name="org.openecomp.sa" />
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger level="WARN" name="net.sf" />
+ <logger level="WARN" name="org.apache.commons.httpclient" />
+ <logger level="WARN" name="org.apache.commons" />
+ <logger level="WARN" name="org.apache.coyote" />
+ <logger level="WARN" name="org.apache.jasper" />
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger level="WARN" name="org.apache.camel" />
+ <logger level="WARN" name="org.apache.cxf" />
+ <logger level="WARN" name="org.apache.camel.processor.interceptor" />
+ <logger level="WARN" name="org.apache.cxf.jaxrs.interceptor" />
+ <logger level="WARN" name="org.apache.cxf.service" />
+ <logger level="WARN" name="org.restlet" />
+ <logger level="WARN" name="org.apache.camel.component.restlet" />
+ <!-- logback internals logging -->
+ <logger level="WARN" name="ch.qos.logback.classic" />
+ <logger level="WARN" name="ch.qos.logback.core" />
+ <root>
+ <appender-ref ref="asyncEELF" />
+ <!-- <appender-ref ref="asyncEELFDebug" /> -->
+ </root>
+</configuration>
diff --git a/kubernetes/aai/resources/sparky-be/conf/logback.xml b/kubernetes/aai/resources/sparky-be/conf/logback.xml
new file mode 100644
index 0000000000..b822662050
--- /dev/null
+++ b/kubernetes/aai/resources/sparky-be/conf/logback.xml
@@ -0,0 +1,200 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration scan="true" scanPeriod="3 seconds" debug="false">
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+ <property name="logDir" value="/var/log/onap" />
+ <property name="componentName" value="AAI-UI" />
+ <property name="logDirectory" value="${logDir}/${componentName}" />
+ <!-- default eelf log file names -->
+ <property name="generalLogName" value="application" />
+ <property name="errorLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+ <property name="queueSize" value="256" />
+ <property name="maxFileSize" value="50MB" />
+ <property name="maxHistory" value="30" />
+ <property name="totalSizeCap" value="10GB" />
+ <property name="pattern" value="%d{&amp;quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&amp;quot;, UTC}\t[%thread]\t%-5level\t%logger{36}\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
+
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+ <appender name="EELF" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+ <!-- EELF Audit Appender. This appender is used to record audit engine
+ related logging events. The audit logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+ <appender name="EELFAudit" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+ <appender name="EELFMetrics" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFMetrics" />
+ </appender>
+
+ <appender name="EELFError" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${errorLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ </appender>
+ <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFError" />
+ </appender>
+
+ <appender name="EELFDebug" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${debugLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>${queueSize}</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>false</includeCallerData>
+ </appender>
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger name="com.att.eelf" level="INFO" additivity="false" />
+ <logger name="org.elasticsearch.plugins" level="WARN" />
+ <logger name="com.att.eelf.debug" level="debug" additivity="false">
+ <appender-ref ref="asyncEELFDebug" />
+ </logger>
+ <logger name="com.att.eelf.audit" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+ <logger name="com.att.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+ <logger name="com.att.eelf.error" level="info" additivity="false">
+ <appender-ref ref="asyncEELFError" />
+ </logger>
+ <!-- Spring related loggers -->
+ <logger name="org.springframework" level="WARN" />
+ <logger name="org.springframework.beans" level="WARN" />
+ <logger name="org.springframework.web" level="WARN" />
+ <logger name="com.blog.spring.jms" level="WARN" />
+ <!-- AJSC Services (bootstrap services) -->
+ <logger name="ajsc" level="WARN" />
+ <logger name="ajsc.RouteMgmtService" level="WARN" />
+ <logger name="ajsc.ComputeService" level="WARN" />
+ <logger name="ajsc.VandelayService" level="WARN" />
+ <logger name="ajsc.FilePersistenceService" level="WARN" />
+ <logger name="ajsc.UserDefinedJarService" level="WARN" />
+ <logger name="ajsc.UserDefinedBeansDefService" level="WARN" />
+ <logger name="ajsc.LoggingConfigurationService" level="WARN" />
+ <logger name="ajsc.ErrorMessageLookupService" level="WARN" />
+ <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+ logging) -->
+ <logger name="ajsc.utils" level="WARN" />
+ <logger name="ajsc.utils.DME2Helper" level="WARN" />
+ <logger name="ajsc.filters" level="WARN" />
+ <logger name="ajsc.beans.interceptors" level="WARN" />
+ <logger name="ajsc.restlet" level="WARN" />
+ <logger name="ajsc.servlet" level="WARN" />
+ <logger name="com.att" level="WARN" />
+ <logger name="com.att.ajsc.csi.logging" level="WARN" />
+ <logger name="com.att.ajsc.filemonitor" level="WARN" />
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger name="net.sf" level="WARN" />
+ <logger name="org.apache.commons.httpclient" level="WARN" />
+ <logger name="org.apache.commons" level="WARN" />
+ <logger name="org.apache.coyote" level="WARN" />
+ <logger name="org.apache.jasper" level="WARN" />
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger name="org.apache.camel" level="WARN" />
+ <logger name="org.apache.cxf" level="WARN" />
+ <logger name="org.apache.camel.processor.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.service" level="WARN" />
+ <logger name="org.restlet" level="WARN" />
+ <logger name="org.apache.camel.component.restlet" level="WARN" />
+ <!-- logback internals logging -->
+ <logger name="ch.qos.logback.classic" level="INFO" />
+ <logger name="ch.qos.logback.core" level="INFO" />
+ <root level="INFO">
+ <appender-ref ref="asyncEELFDebug" />
+ </root>
+</configuration> \ No newline at end of file
diff --git a/kubernetes/config/docker/init/src/config/log/aai/ajsc-aai/logback.xml b/kubernetes/aai/resources/traversal/conf/logback.xml
index b16b6ebb18..825fac9b47 100644
--- a/kubernetes/config/docker/init/src/config/log/aai/ajsc-aai/logback.xml
+++ b/kubernetes/aai/resources/traversal/conf/logback.xml
@@ -1,295 +1,345 @@
-<configuration debug="false" scan="true" scanPeriod="60 seconds">
- <contextName>${module.ajsc.namespace.name}</contextName>
- <jmxConfigurator />
- <property name="logDir" value="/var/log/onap" />
- <property name="componentName" value="aai"></property>
- <property name="restLogDirectory" value="${logDir}/${componentName}/rest" />
- <property name="dmaapLogDirectory" value="${logDir}/${componentName}/dmaapAAIWorkloadConsumer" />
- <property name="perfLogsDirectory" value="${logDir}/${componentName}/perf-audit" />
- <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger{36}\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
- <!-- log file names -->
- <property name="errorLogName" value="error" />
- <property name="metricsLogName" value="metrics" />
- <property name="auditLogName" value="audit" />
- <property name="debugLogName" value="debug" />
- <property name="queueSize" value="256" />
- <property name="maxFileSize" value="50MB" />
- <property name="maxHistory" value="30" />
- <property name="totalSizeCap" value="10GB" />
- <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
- <filter class="ch.qos.logback.classic.filter.LevelFilter">
- <level>ERROR</level>
- <onMatch>ACCEPT</onMatch>
- <onMismatch>DENY</onMismatch>
- </filter>
- <encoder>
- <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="METRIC">
- <filter class="ch.qos.logback.classic.filter.LevelFilter">
- <level>INFO</level>
- <onMatch>ACCEPT</onMatch>
- <onMismatch>DENY</onMismatch>
- </filter>
- <file>${restLogDirectory}/${metricsLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${restLogDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncMETRIC">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="METRIC" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="DEBUG">
- <filter class="ch.qos.logback.classic.filter.LevelFilter">
- <level>DEBUG</level>
- <onMatch>ACCEPT</onMatch>
- <onMismatch>DENY</onMismatch>
- </filter>
- <file>${restLogDirectory}/${debugLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${restLogDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncDEBUG">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="DEBUG" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="ERROR">
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
- <level>WARN</level>
- </filter>
- <file>${restLogDirectory}/${errorLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${restLogDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncERROR">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="ERROR" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="AUDIT">
- <file>${restLogDirectory}/${auditLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${restLogDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncAUDIT">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="AUDIT" />
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIWorkloadConsumer">
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
- <level>WARN</level>
- </filter>
- <File>${dmaapLogDirectory}/${errorLogName}.log</File>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${dmaapLogDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIWorkloadConsumerdebug">
- <filter class="ch.qos.logback.classic.filter.LevelFilter">
- <level>DEBUG</level>
- <onMatch>ACCEPT</onMatch>
- <onMismatch>DENY</onMismatch>
- </filter>
- <File>${dmaapLogDirectory}/${debugLogName}.log</File>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${dmaapLogDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIWorkloadConsumermetric">
- <filter class="ch.qos.logback.classic.filter.LevelFilter">
- <level>INFO</level>
- <onMatch>ACCEPT</onMatch>
- <onMismatch>DENY</onMismatch>
- </filter>
- <File>${dmaapLogDirectory}/${metricsLogName}.log</File>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${dmaapLogDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <!-- Spring related loggers -->
- <logger level="WARN" name="org.springframework" />
- <logger level="WARN" name="org.springframework.beans" />
- <logger level="WARN" name="org.springframework.web" />
- <logger level="WARN" name="com.blog.spring.jms" />
- <!-- AJSC Services (bootstrap services) -->
- <logger level="WARN" name="ajsc" />
- <logger level="WARN" name="ajsc.RouteMgmtService" />
- <logger level="WARN" name="ajsc.ComputeService" />
- <logger level="WARN" name="ajsc.VandelayService" />
- <logger level="WARN" name="ajsc.FilePersistenceService" />
- <logger level="WARN" name="ajsc.UserDefinedJarService" />
- <logger level="WARN" name="ajsc.UserDefinedBeansDefService" />
- <logger level="WARN" name="ajsc.LoggingConfigurationService" />
- <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet logging) -->
- <logger level="WARN" name="ajsc.utils" />
- <logger level="WARN" name="ajsc.utils.DME2Helper" />
- <logger level="WARN" name="ajsc.filters" />
- <logger level="WARN" name="ajsc.beans.interceptors" />
- <logger level="WARN" name="ajsc.restlet" />
- <logger level="WARN" name="ajsc.servlet" />
- <logger level="WARN" name="com.att.ajsc" />
- <logger level="WARN" name="com.att.ajsc.csi.logging" />
- <logger level="WARN" name="com.att.ajsc.filemonitor" />
- <!-- Other Loggers that may help troubleshoot -->
- <logger level="WARN" name="net.sf" />
- <logger level="WARN" name="org.apache.commons.httpclient" />
- <logger level="WARN" name="org.apache.commons" />
- <logger level="WARN" name="org.apache.coyote" />
- <logger level="WARN" name="org.apache.jasper" />
- <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging. May aid in troubleshooting) -->
- <logger level="WARN" name="org.apache.camel" />
- <logger level="WARN" name="org.apache.cxf" />
- <logger level="WARN" name="org.apache.camel.processor.interceptor" />
- <logger level="WARN" name="org.apache.cxf.jaxrs.interceptor" />
- <logger level="WARN" name="org.apache.cxf.service" />
- <logger level="WARN" name="org.restlet" />
- <logger level="WARN" name="org.apache.camel.component.restlet" />
- <!-- logback internals logging -->
- <logger level="WARN" name="ch.qos.logback.classic" />
- <logger level="WARN" name="ch.qos.logback.core" />
- <!-- logback jms appenders & loggers definition starts here -->
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="auditLogs">
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter" />
- <file>${perfLogsDirectory}/Audit-${lrmRVer}-${lrmRO}-${Pid}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${perfLogsDirectory}/Audit-${lrmRVer}-${lrmRO}-${Pid}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="perfLogs">
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter" />
- <file>${perfLogsDirectory}/Perform-${lrmRVer}-${lrmRO}-${Pid}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${perfLogsDirectory}/Perform-${lrmRVer}-${lrmRO}-${Pid}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="ASYNC-audit">
- <queueSize>1000</queueSize>
- <discardingThreshold>0</discardingThreshold>
- <appender-ref ref="Audit-Record-Queue" />
- </appender>
- <logger additivity="false" level="INFO" name="AuditRecord">
- <appender-ref ref="ASYNC-audit" />
- <appender-ref ref="auditLogs" />
- </logger>
- <logger additivity="false" level="INFO" name="AuditRecord_DirectCall">
- <appender-ref ref="ASYNC-audit" />
- <appender-ref ref="auditLogs" />
- </logger>
- <appender class="ch.qos.logback.classic.AsyncAppender" name="ASYNC-perf">
- <queueSize>1000</queueSize>
- <discardingThreshold>0</discardingThreshold>
- <appender-ref ref="Performance-Tracker-Queue" />
- </appender>
- <logger additivity="false" level="INFO" name="PerfTrackerRecord">
- <appender-ref ref="ASYNC-perf" />
- <appender-ref ref="perfLogs" />
- </logger>
- <!-- logback jms appenders & loggers definition ends here -->
- <logger additivity="false" level="DEBUG" name="org.openecomp.aai.dmaap">
- <appender-ref ref="dmaapAAIWorkloadConsumer" />
- <appender-ref ref="dmaapAAIWorkloadConsumerdebug" />
- <appender-ref ref="dmaapAAIWorkloadConsumermetric" />
- </logger>
- <logger additivity="false" level="INFO" name="org.openecomp.aai">
- <appender-ref ref="asyncDEBUG" />
- <appender-ref ref="asyncERROR" />
- <appender-ref ref="asyncMETRIC" />
- </logger>
- <logger additivity="false" level="INFO" name="org.openecomp.aai.AUDITLOGGER">
- <appender-ref ref="asyncAUDIT" />
- </logger>
- <logger level="WARN" name="org.apache" />
- <logger level="WARN" name="org.zookeeper" />
- <logger level="WARN" name="com.thinkaurelius" />
- <!-- ============================================================================ -->
- <!-- General EELF logger -->
- <!-- ============================================================================ -->
- <logger additivity="false" level="WARN" name="com.att.eelf">
- <appender-ref ref="asyncDEBUG" />
- <appender-ref ref="asyncERROR" />
- <appender-ref ref="asyncMETRIC" />
- <appender-ref ref="asyncAUDIT" />
- </logger>
- <root level="WARN">
- <appender-ref ref="asyncDEBUG" />
- <appender-ref ref="asyncERROR" />
- <appender-ref ref="asyncMETRIC" />
- <appender-ref ref="asyncAUDIT" />
- </root>
-</configuration>
+<configuration debug="false" scan="true" scanPeriod="3 seconds">
+ <contextName>${module.ajsc.namespace.name}</contextName>
+ <jmxConfigurator />
+ <property name="logDir" value="/var/log/onap" />
+ <property name="componentName" value="aai"></property>
+ <property name="restLogDirectory" value="${logDir}/${componentName}/rest" />
+ <property name="dmaapLogDirectory" value="${logDir}/${componentName}/dmaapAAIEventConsumer" />
+ <property name="perfLogsDirectory" value="${logDir}/${componentName}/perf-audit" />
+ <!-- default eelf log file names -->
+ <property name="errorLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+ <property name="queueSize" value="256" />
+ <property name="maxFileSize" value="50MB" />
+ <property name="maxHistory" value="30" />
+ <property name="totalSizeCap" value="10GB" />
+ <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
+ <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="SANE">
+ <file>${restLogDirectory}/sane.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/sane.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncSANE">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="SANE" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="METRIC">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <file>${restLogDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncMETRIC">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="METRIC" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="DEBUG">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>DEBUG</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <file>${restLogDirectory}/${debugLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncDEBUG">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="DEBUG" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="ERROR">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>WARN</level>
+ </filter>
+ <file>${restLogDirectory}/${errorLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncERROR">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="ERROR" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="AUDIT">
+ <file>${restLogDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncAUDIT">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="AUDIT" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="translog">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>DEBUG</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <file>${restLogDirectory}/translog.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${restLogDirectory}/translog.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="asynctranslog">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="translog" />
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIEventConsumer">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>WARN</level>
+ </filter>
+ <File>${dmaapLogDirectory}/${errorLogName}.log</File>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${dmaapLogDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIEventConsumerDebug">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>DEBUG</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <File>${dmaapLogDirectory}/${debugLogName}.log</File>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${dmaapLogDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="dmaapAAIEventConsumerMetric">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <File>${dmaapLogDirectory}/${metricsLogName}.log</File>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${dmaapLogDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder class="org.openecomp.aai.logging.EcompEncoder">
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <!-- Spring related loggers -->
+ <logger level="WARN" name="org.springframework" />
+ <logger level="WARN" name="org.springframework.beans" />
+ <logger level="WARN" name="org.springframework.web" />
+ <logger level="WARN" name="com.blog.spring.jms" />
+ <!-- AJSC Services (bootstrap services) -->
+ <logger level="WARN" name="ajsc" />
+ <logger level="WARN" name="ajsc.RouteMgmtService" />
+ <logger level="WARN" name="ajsc.ComputeService" />
+ <logger level="WARN" name="ajsc.VandelayService" />
+ <logger level="WARN" name="ajsc.FilePersistenceService" />
+ <logger level="WARN" name="ajsc.UserDefinedJarService" />
+ <logger level="WARN" name="ajsc.UserDefinedBeansDefService" />
+ <logger level="WARN" name="ajsc.LoggingConfigurationService" />
+ <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+ logging) -->
+ <logger level="WARN" name="ajsc.utils" />
+ <logger level="WARN" name="ajsc.utils.DME2Helper" />
+ <logger level="WARN" name="ajsc.filters" />
+ <logger level="WARN" name="ajsc.beans.interceptors" />
+ <logger level="WARN" name="ajsc.restlet" />
+ <logger level="WARN" name="ajsc.servlet" />
+ <logger level="WARN" name="com.att.ajsc" />
+ <logger level="WARN" name="com.att.ajsc.csi.logging" />
+ <logger level="WARN" name="com.att.ajsc.filemonitor" />
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger level="WARN" name="net.sf" />
+ <logger level="WARN" name="org.apache.commons.httpclient" />
+ <logger level="WARN" name="org.apache.commons" />
+ <logger level="WARN" name="org.apache.coyote" />
+ <logger level="WARN" name="org.apache.jasper" />
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger level="WARN" name="org.apache.camel" />
+ <logger level="WARN" name="org.apache.cxf" />
+ <logger level="WARN" name="org.apache.camel.processor.interceptor" />
+ <logger level="WARN" name="org.apache.cxf.jaxrs.interceptor" />
+ <logger level="WARN" name="org.apache.cxf.service" />
+ <logger level="WARN" name="org.restlet" />
+ <logger level="WARN" name="org.apache.camel.component.restlet" />
+ <!-- logback internals logging -->
+ <logger level="WARN" name="ch.qos.logback.classic" />
+ <logger level="WARN" name="ch.qos.logback.core" />
+ <!-- logback jms appenders & loggers definition starts here -->
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="auditLogs">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter" />
+ <file>${perfLogsDirectory}/Audit-${lrmRVer}-${lrmRO}-${Pid}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${perfLogsDirectory}/Audit-${lrmRVer}-${lrmRO}-${Pid}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="perfLogs">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter" />
+ <file>${perfLogsDirectory}/Perform-${lrmRVer}-${lrmRO}-${Pid}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${perfLogsDirectory}/Perform-${lrmRVer}-${lrmRO}-${Pid}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>${maxFileSize}</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>${maxHistory}</maxHistory>
+ <totalSizeCap>${totalSizeCap}</totalSizeCap>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${pattern}</pattern>
+ </encoder>
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="ASYNC-audit">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="Audit-Record-Queue" />
+ </appender>
+ <appender class="ch.qos.logback.classic.AsyncAppender" name="ASYNC-perf">
+ <queueSize>${queueSize}</queueSize>
+ <includeCallerData>true</includeCallerData>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="Performance-Tracker-Queue" />
+ </appender>
+ <logger additivity="false" level="INFO" name="AuditRecord">
+ <appender-ref ref="ASYNC-audit" />
+ <appender-ref ref="auditLogs" />
+ </logger>
+ <logger additivity="false" level="INFO" name="AuditRecord_DirectCall">
+ <appender-ref ref="ASYNC-audit" />
+ <appender-ref ref="auditLogs" />
+ </logger>
+ <logger additivity="false" level="INFO" name="PerfTrackerRecord">
+ <appender-ref ref="ASYNC-perf" />
+ <appender-ref ref="perfLogs" />
+ </logger>
+ <!-- logback jms appenders & loggers definition ends here -->
+ <logger additivity="false" level="DEBUG" name="org.openecomp.aai.interceptors">
+ <appender-ref ref="asynctranslog" />
+ </logger>
+ <logger level="DEBUG" name="org.openecomp.aai.interceptors.PreAaiAjscInterceptor">
+ <appender-ref ref="asyncAUDIT" />
+ </logger>
+ <logger level="DEBUG" name="org.openecomp.aai.interceptors.PostAaiAjscInterceptor">
+ <appender-ref ref="asyncAUDIT" />
+ </logger>
+ <logger additivity="false" level="DEBUG" name="org.openecomp.aai.dmaap">
+ <appender-ref ref="dmaapAAIEventConsumer" />
+ <appender-ref ref="dmaapAAIEventConsumerDebug" />
+ <appender-ref ref="dmaapAAIEventConsumerMetric" />
+ </logger>
+ <logger level="WARN" name="org.apache" />
+ <logger level="WARN" name="org.zookeeper" />
+ <logger level="WARN" name="com.thinkaurelius" />
+ <!-- ============================================================================ -->
+ <!-- General EELF logger -->
+ <!-- ============================================================================ -->
+ <logger additivity="false" level="WARN" name="com.att.eelf">
+ <appender-ref ref="asyncDEBUG" />
+ <appender-ref ref="asyncERROR" />
+ <appender-ref ref="asyncMETRIC" />
+ <appender-ref ref="asyncAUDIT" />
+ </logger>
+ <root level="INFO">
+ <appender-ref ref="asyncDEBUG" />
+ <appender-ref ref="asyncERROR" />
+ </root>
+</configuration>
diff --git a/kubernetes/aai/templates/aai-resources-deployment.yaml b/kubernetes/aai/templates/aai-resources-deployment.yaml
index 6c78ca8798..b0d437b61d 100644
--- a/kubernetes/aai/templates/aai-resources-deployment.yaml
+++ b/kubernetes/aai/templates/aai-resources-deployment.yaml
@@ -54,16 +54,19 @@ spec:
value: /var/chef/aai-data/environments
- name: CHEF_GIT_URL
value: http://gerrit.onap.org/r/aai
- - name: LOCAL_USER_ID
- value: "0"
+ - name: HBASE_STARTUP_ARTIFICIAL_DELAY
+ value: "60"
volumeMounts:
- mountPath: /etc/localtime
name: localtime
readOnly: true
- - mountPath: /opt/aai/logroot/AAI-RES
- name: aai-resources-logs
- mountPath: /var/chef/aai-data/
name: aai-data
+ - mountPath: /var/log/onap
+ name: aai-resources-logs
+ - mountPath: /opt/app/aai-resources/bundleconfig/etc/logback.xml
+ name: aai-resources-log-conf
+ subPath: logback.xml
ports:
- containerPort: 8447
readinessProbe:
@@ -71,16 +74,42 @@ spec:
port: 8447
initialDelaySeconds: 5
periodSeconds: 10
+ - name: filebeat-onap-aai-resources
+ image: {{ .Values.image.filebeat }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ volumeMounts:
+ - mountPath: /usr/share/filebeat/filebeat.yml
+ name: filebeat-conf
+ - mountPath: /var/log/onap
+ name: aai-resources-logs
+ - mountPath: /usr/share/filebeat/data
+ name: aai-resources-filebeat
volumes:
- name: localtime
hostPath:
path: /etc/localtime
- - name: aai-resources-logs
- hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-resources/logs/"
- name: aai-data
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-data/"
+ - name: filebeat-conf
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/log4j/filebeat.yml
+ - name: aai-resources-logs
+ emptyDir: {}
+ - name: aai-resources-filebeat
+ emptyDir: {}
+ - name: aai-resources-log-conf
+ configMap:
+ name: aai-resources-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-resources-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ (.Files.Glob "resources/resources/conf/logback.xml").AsConfig | indent 2 }}
+
diff --git a/kubernetes/aai/templates/aai-traversal-deployment.yaml b/kubernetes/aai/templates/aai-traversal-deployment.yaml
index a56608009a..d2a9c42f3f 100644
--- a/kubernetes/aai/templates/aai-traversal-deployment.yaml
+++ b/kubernetes/aai/templates/aai-traversal-deployment.yaml
@@ -58,16 +58,17 @@ spec:
value: http://gerrit.onap.org/r/aai
- name: RESOURCES_HOSTNAME
value: aai-resources.{{ .Values.nsPrefix }}-aai
- - name: LOCAL_USER_ID
- value: "0"
volumeMounts:
- mountPath: /etc/localtime
name: localtime
readOnly: true
- - mountPath: /opt/aai/logroot/AAI-GQ
- name: aai-traversal-logs
- mountPath: /var/chef/aai-data/
name: aai-data
+ - mountPath: /var/log/onap
+ name: aai-traversal-logs
+ - mountPath: /opt/app/aai-traversal/bundleconfig/etc/logback.xml
+ name: aai-traversal-log-conf
+ subPath: logback.xml
ports:
- containerPort: 8446
readinessProbe:
@@ -75,16 +76,42 @@ spec:
port: 8446
initialDelaySeconds: 5
periodSeconds: 10
+ - name: filebeat-onap-aai-traversal
+ image: {{ .Values.image.filebeat }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ volumeMounts:
+ - mountPath: /usr/share/filebeat/filebeat.yml
+ name: filebeat-conf
+ - mountPath: /var/log/onap
+ name: aai-traversal-logs
+ - mountPath: /usr/share/filebeat/data
+ name: aai-traversal-filebeat
volumes:
- name: localtime
hostPath:
path: /etc/localtime
- - name: aai-traversal-logs
- hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-traversal/logs/"
- name: aai-data
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-data/"
+ - name: filebeat-conf
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/log4j/filebeat.yml
+ - name: aai-traversal-logs
+ emptyDir: {}
+ - name: aai-traversal-filebeat
+ emptyDir: {}
+ - name: aai-traversal-log-conf
+ configMap:
+ name: aai-traversal-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-traversal-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ (.Files.Glob "resources/traversal/conf/logback.xml").AsConfig | indent 2 }}
+
diff --git a/kubernetes/aai/templates/modelloader-deployment.yaml b/kubernetes/aai/templates/modelloader-deployment.yaml
index 22b72815cb..3f2bda3d6c 100644
--- a/kubernetes/aai/templates/modelloader-deployment.yaml
+++ b/kubernetes/aai/templates/modelloader-deployment.yaml
@@ -14,7 +14,10 @@ spec:
name: model-loader-service
spec:
containers:
- - env:
+ - name: model-loader-service
+ image: "{{ .Values.image.modelLoaderImage }}:{{ .Values.image.modelLoaderVersion }}"
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ env:
- name: CONFIG_HOME
value: /opt/app/model-loader/config/
volumeMounts:
@@ -23,14 +26,24 @@ spec:
readOnly: true
- mountPath: /opt/app/model-loader/config/
name: aai-model-loader-config
- - mountPath: /logs/
+ - mountPath: /var/log/onap
name: aai-model-loader-logs
- image: "{{ .Values.image.modelLoaderImage }}:{{ .Values.image.modelLoaderVersion }}"
- imagePullPolicy: {{ .Values.pullPolicy }}
- name: model-loader-service
+ - mountPath: /opt/app/model-loader/bundleconfig/etc/logback.xml
+ name: aai-model-loader-log-conf
+ subPath: logback.xml
ports:
- containerPort: 8080
- containerPort: 8443
+ - name: filebeat-onap-aai-model-loader
+ image: {{ .Values.image.filebeat }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ volumeMounts:
+ - mountPath: /usr/share/filebeat/filebeat.yml
+ name: filebeat-conf
+ - mountPath: /var/log/onap
+ name: aai-model-loader-logs
+ - mountPath: /usr/share/filebeat/data
+ name: aai-model-loader-filebeat
volumes:
- name: localtime
hostPath:
@@ -38,9 +51,25 @@ spec:
- name: aai-model-loader-config
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/model-loader/appconfig/"
- - name: aai-model-loader-logs
+ - name: filebeat-conf
hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/model-loader/logs/"
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/log4j/filebeat.yml
+ - name: aai-model-loader-logs
+ emptyDir: {}
+ - name: aai-model-loader-filebeat
+ emptyDir: {}
+ - name: aai-model-loader-log-conf
+ configMap:
+ name: aai-model-loader-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-model-loader-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ (.Files.Glob "resources/model-loader/conf/logback.xml").AsConfig | indent 2 }}
+
diff --git a/kubernetes/aai/templates/search-data-service-deployment.yaml b/kubernetes/aai/templates/search-data-service-deployment.yaml
index 74132758f2..3eb0c5c714 100644
--- a/kubernetes/aai/templates/search-data-service-deployment.yaml
+++ b/kubernetes/aai/templates/search-data-service-deployment.yaml
@@ -30,8 +30,11 @@ spec:
readOnly: true
- mountPath: /opt/app/search-data-service/config/
name: aai-search-data-service-config
- - mountPath: /logs/
+ - mountPath: /var/log/onap
name: aai-search-data-service-logs
+ - mountPath: /opt/app/search-data-service/bundleconfig/etc/logback.xml
+ name: aai-search-data-service-log-conf
+ subPath: logback.xml
ports:
- containerPort: 9509
readinessProbe:
@@ -39,6 +42,16 @@ spec:
port: 9509
initialDelaySeconds: 5
periodSeconds: 10
+ - name: filebeat-onap-aai-search
+ image: {{ .Values.image.filebeat }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ volumeMounts:
+ - mountPath: /usr/share/filebeat/filebeat.yml
+ name: filebeat-conf
+ - mountPath: /var/log/onap
+ name: aai-search-data-service-logs
+ - mountPath: /usr/share/filebeat/data
+ name: aai-search-data-service-filebeat
volumes:
- name: localtime
hostPath:
@@ -46,9 +59,25 @@ spec:
- name: aai-search-data-service-config
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/search-data-service/appconfig/"
- - name: aai-search-data-service-logs
+ - name: filebeat-conf
hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/search-data-service/logs/"
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/log4j/filebeat.yml
+ - name: aai-search-data-service-logs
+ emptyDir: {}
+ - name: aai-search-data-service-filebeat
+ emptyDir: {}
+ - name: aai-search-data-service-log-conf
+ configMap:
+ name: aai-search-data-service-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-search-data-service-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ (.Files.Glob "resources/search-data-service/conf/logback.xml").AsConfig | indent 2 }}
+
diff --git a/kubernetes/aai/templates/sparky-be-deployment.yaml b/kubernetes/aai/templates/sparky-be-deployment.yaml
index a17325c056..ee9e067281 100644
--- a/kubernetes/aai/templates/sparky-be-deployment.yaml
+++ b/kubernetes/aai/templates/sparky-be-deployment.yaml
@@ -30,8 +30,11 @@ spec:
readOnly: true
- mountPath: /opt/app/sparky/config/
name: aai-sparky-be-config
- - mountPath: /logs/
+ - mountPath: /var/log/onap
name: aai-sparky-be-logs
+ - mountPath: /opt/app/sparky/bundleconfig/etc/logback.xml
+ name: aai-sparky-be-log-conf
+ subPath: logback.xml
ports:
- containerPort: 9517
readinessProbe:
@@ -39,6 +42,16 @@ spec:
port: 9517
initialDelaySeconds: 5
periodSeconds: 10
+ - name: filebeat-onap-aai-sparky
+ image: {{ .Values.image.filebeat }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ volumeMounts:
+ - mountPath: /usr/share/filebeat/filebeat.yml
+ name: filebeat-conf
+ - mountPath: /var/log/onap
+ name: aai-sparky-be-logs
+ - mountPath: /usr/share/filebeat/data
+ name: aai-sparky-filebeat
volumes:
- name: localtime
hostPath:
@@ -46,9 +59,25 @@ spec:
- name: aai-sparky-be-config
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/sparky-be/appconfig/"
- - name: aai-sparky-be-logs
+ - name: filebeat-conf
hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/sparky-be/logs/"
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/log4j/filebeat.yml
+ - name: aai-sparky-be-logs
+ emptyDir: {}
+ - name: aai-sparky-filebeat
+ emptyDir: {}
+ - name: aai-sparky-be-log-conf
+ configMap:
+ name: aai-sparky-be-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-sparky-be-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ (.Files.Glob "resources/sparky-be/conf/logback.xml").AsConfig | indent 2 }}
+
diff --git a/kubernetes/aai/values.yaml b/kubernetes/aai/values.yaml
index 9c549c781b..44ed7ca7ea 100644
--- a/kubernetes/aai/values.yaml
+++ b/kubernetes/aai/values.yaml
@@ -22,3 +22,4 @@ image:
sparkyBeImage: nexus3.onap.org:10001/openecomp/sparky-be
sparkyBeVersion: 1.1-STAGING-latest
gremlinServerImage: aaionap/gremlin-server
+ filebeat: docker.elastic.co/beats/filebeat:5.5.0
diff --git a/kubernetes/appc/templates/appc-deployment.yaml b/kubernetes/appc/templates/appc-deployment.yaml
index f320fee539..9beaacaa66 100644
--- a/kubernetes/appc/templates/appc-deployment.yaml
+++ b/kubernetes/appc/templates/appc-deployment.yaml
@@ -62,10 +62,8 @@ spec:
name: appc-properties
- mountPath: /opt/openecomp/appc/data/properties/aaiclient.properties
name: appc-aaiclient-properties
- - mountPath: /opt/openecomp/sdnc/data/properties/aaiclient.properties
+ - mountPath: /opt/onap/sdnc/data/properties/aaiclient.properties
name: sdnc-aaiclient-properties
- - mountPath: /opt/openecomp/sdnc/data/properties/admportal.json
- name: sdnc-admportal-json
- mountPath: /var/log/onap
name: appc-logs
- mountPath: /opt/opendaylight/current/etc/org.ops4j.pax.logging.cfg
@@ -111,8 +109,5 @@ spec:
- name: sdnc-aaiclient-properties
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/aaiclient.properties
- - name: sdnc-admportal-json
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/admportal.json
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
diff --git a/kubernetes/appc/templates/dgbuilder-deployment.yaml b/kubernetes/appc/templates/dgbuilder-deployment.yaml
index 07923930d8..a2bd4a1bbb 100644
--- a/kubernetes/appc/templates/dgbuilder-deployment.yaml
+++ b/kubernetes/appc/templates/dgbuilder-deployment.yaml
@@ -45,7 +45,7 @@ spec:
- command:
- /bin/bash
- -c
- - cd /opt/openecomp/sdnc/dgbuilder/ && ./start.sh sdnc1.0 && wait
+ - cd /opt/onap/sdnc/dgbuilder/ && ./start.sh sdnc1.0 && wait
env:
- name: MYSQL_ROOT_PASSWORD
value: openECOMP1.0
diff --git a/kubernetes/appc/values.yaml b/kubernetes/appc/values.yaml
index ea14b83960..624c6421d2 100644
--- a/kubernetes/appc/values.yaml
+++ b/kubernetes/appc/values.yaml
@@ -5,5 +5,5 @@ image:
readiness: oomk8s/readiness-check:1.0.0
appc: nexus3.onap.org:10001/openecomp/appc-image:1.1-STAGING-latest
mysqlServer: mysql/mysql-server:5.6
- dgbuilderSdnc: nexus3.onap.org:10001/openecomp/dgbuilder-sdnc-image:1.1-STAGING-latest
+ dgbuilderSdnc: nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.1-STAGING-latest
filebeat: docker.elastic.co/beats/filebeat:5.5.0 \ No newline at end of file
diff --git a/kubernetes/config/docker/init/src/config/appc/conf/aaiclient.properties b/kubernetes/config/docker/init/src/config/appc/conf/aaiclient.properties
index a24dfd6a51..67af3ff21c 100644
--- a/kubernetes/config/docker/init/src/config/appc/conf/aaiclient.properties
+++ b/kubernetes/config/docker/init/src/config/appc/conf/aaiclient.properties
@@ -25,125 +25,120 @@
#
# Certificate keystore and truststore
#
-org.openecomp.sdnc.sli.aai.ssl.trust=/opt/openecomp/appc/data/stores/truststore.openecomp.client.jks
-org.openecomp.sdnc.sli.aai.ssl.trust.psswd=adminadmin
-org.openecomp.sdnc.sli.aai.host.certificate.ignore=true
+org.onap.ccsdk.sli.adaptors.aai.ssl.trust=/opt/openecomp/appc/data/stores/truststore.openecomp.client.jks
+org.onap.ccsdk.sli.adaptors.aai.ssl.trust.psswd=adminadmin
+org.onap.ccsdk.sli.adaptors.aai.host.certificate.ignore=true
-org.openecomp.sdnc.sli.aai.client.name=APPC
-org.openecomp.sdnc.sli.aai.client.psswd=APPC
+org.onap.ccsdk.sli.adaptors.aai.client.name=AAI
+org.onap.ccsdk.sli.adaptors.aai.client.psswd=AAI
-org.openecomp.sdnc.sli.aai.application=openECOMP
+org.onap.ccsdk.sli.adaptors.aai.application=openECOMP
#connection.timeout=1000
#read.timeout=2000
-## TODO - Check if the values below are needed ##
-#When the p12 key expires, the new one will probably work with the old jks so no need to generate a new jks.
-#org.openecomp.sdnc.sli.aai.ssl.key=/opt/openecomp/appc/data/stores/keystore.client.p12
-#org.openecomp.sdnc.sli.aai.ssl.key.psswd=aaiDomain2
-
-
#
# Configuration file for A&AI Client
#
-org.openecomp.sdnc.sli.aai.uri=https://aai-service.onap-aai:8443
+org.onap.ccsdk.sli.adaptors.aai.uri=https://aai-service.onap-aai:8443
# query
-org.openecomp.sdnc.sli.aai.path.query=/aai/v8/search/sdn-zone-query
-org.openecomp.sdnc.sli.aai.query.nodes=/aai/v8/search/nodes-query?search-node-type={node-type}&filter={entity-identifier}:EQUALS:{entity-name}
-org.openecomp.sdnc.sli.aai.query.generic=/aai/v8/search/generic-query?key={identifier}:{value}&start-node-type={start-node-type}&include=complex&depth=3
+org.onap.ccsdk.sli.adaptors.aai.path.query=/aai/v11/search/sdn-zone-query
+org.onap.ccsdk.sli.adaptors.aai.query.nodes=/aai/v11/search/nodes-query?search-node-type={node-type}&filter={entity-identifier}:EQUALS:{entity-name}
+org.onap.ccsdk.sli.adaptors.aai.query.generic=/aai/v11/search/generic-query?key={identifier}:{value}&start-node-type={start-node-type}&include=complex&depth=3
# named query
-org.openecomp.sdnc.sli.aai.query.named=/aai/search/named-query
+org.onap.ccsdk.sli.adaptors.aai.query.named=/aai/search/named-query
+
#update
-org.openecomp.sdnc.sli.aai.update=/aai/v8/actions/update
+org.onap.ccsdk.sli.adaptors.aai.update=/aai/v11/actions/update
# vce
-org.openecomp.sdnc.sli.aai.path.vce =/aai/v8/network/vces/vce/
-org.openecomp.sdnc.sli.aai.path.vces=/aai/v8/network/vces/
+org.onap.ccsdk.sli.adaptors.aai.path.vce =/aai/v11/network/vces/vce/
+org.onap.ccsdk.sli.adaptors.aai.path.vces=/aai/v11/network/vces/
# vpe
-org.openecomp.sdnc.sli.aai.path.vpe =/aai/v8/network/vpes/vpe/
-org.openecomp.sdnc.sli.aai.path.vpes=/aai/v8/network/vpes/
+org.onap.ccsdk.sli.adaptors.aai.path.vpe =/aai/v11/network/vpes/vpe/
+org.onap.ccsdk.sli.adaptors.aai.path.vpes=/aai/v11/network/vpes/
# customer
-org.openecomp.sdnc.sli.aai.path.customer=/aai/v8/business/customers/customer/{customer-id}
+org.onap.ccsdk.sli.adaptors.aai.path.customer=/aai/v11/business/customers/customer/{customer-id}
# service subscription
-org.openecomp.sdnc.sli.aai.path.service.subscription=/aai/v8/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}
+org.onap.ccsdk.sli.adaptors.aai.path.service.subscription=/aai/v11/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}
# service instance
-org.openecomp.sdnc.sli.aai.path.svcinst=/aai/v8/business/customers/customer/{customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances
-org.openecomp.sdnc.sli.aai.path.svcinst.query=/aai/v8/search/generic-query?key=service-instance.service-instance-id:{svc-instance-id}&start-node-type=service-instance&include=service-instance
-org.openecomp.sdnc.sli.aai.path.service.instance=/aai/v8/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances/service-instance/{service-instance-id}
+org.onap.ccsdk.sli.adaptors.aai.path.svcinst=/aai/v11/business/customers/customer/{customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances
+org.onap.ccsdk.sli.adaptors.aai.path.svcinst.query=/aai/v11/search/generic-query?key=service-instance.service-instance-id:{svc-instance-id}&start-node-type=service-instance&include=service-instance
+org.onap.ccsdk.sli.adaptors.aai.path.service.instance=/aai/v11/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances/service-instance/{service-instance-id}
# complex
-org.openecomp.sdnc.sli.aai.path.complexes=/aai/v8/cloud-infrastructure/complexes
-org.openecomp.sdnc.sli.aai.path.complex=/aai/v8/cloud-infrastructure/complexes/complex/{physical-location-id}
+org.onap.ccsdk.sli.adaptors.aai.path.complexes=/aai/v11/cloud-infrastructure/complexes
+org.onap.ccsdk.sli.adaptors.aai.path.complex=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}
# tenant
-org.openecomp.sdnc.sli.aai.path.tenant=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}
-org.openecomp.sdnc.sli.aai.path.tenant.query=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant?tenant-name={tenant-name}
+org.onap.ccsdk.sli.adaptors.aai.path.tenant=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}
+org.onap.ccsdk.sli.adaptors.aai.path.tenant.query=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant?tenant-name={tenant-name}
# vservers
-org.openecomp.sdnc.sli.aai.path.vservers=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/
-org.openecomp.sdnc.sli.aai.path.vserver=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}
+org.onap.ccsdk.sli.adaptors.aai.path.vservers=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/
+org.onap.ccsdk.sli.adaptors.aai.path.vserver=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}
# vpls-pe
-org.openecomp.sdnc.sli.aai.path.vpls.pes=/aai/v8/network/vpls-pes/
-org.openecomp.sdnc.sli.aai.path.vpls.pe =/aai/v8/network/vpls-pes/vpls-pe/
+org.onap.ccsdk.sli.adaptors.aai.path.vpls.pes=/aai/v11/network/vpls-pes/
+org.onap.ccsdk.sli.adaptors.aai.path.vpls.pe =/aai/v11/network/vpls-pes/vpls-pe/
# ctag-pool
-org.openecomp.sdnc.sli.aai.path.ctag.pools=/aai/v8/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools
-org.openecomp.sdnc.sli.aai.path.ctag.pool=/aai/v8/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools/ctag-pool/{target-pe}/{availability-zone-name}
+org.onap.ccsdk.sli.adaptors.aai.path.ctag.pools=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools
+org.onap.ccsdk.sli.adaptors.aai.path.ctag.pool=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools/ctag-pool/{target-pe}/{availability-zone-name}
#
#-------------- 1510 ----------------------
#
# pservers
-org.openecomp.sdnc.sli.aai.path.pservers=/aai/v8/cloud-infrastructure/pservers
-org.openecomp.sdnc.sli.aai.path.pserver=/aai/v8/cloud-infrastructure/pservers/pserver/{hostname}
+org.onap.ccsdk.sli.adaptors.aai.path.pservers=/aai/v11/cloud-infrastructure/pservers
+org.onap.ccsdk.sli.adaptors.aai.path.pserver=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}
# generic-vnf
-org.openecomp.sdnc.sli.aai.path.generic.vnfs=/aai/v8/network/generic-vnfs
-org.openecomp.sdnc.sli.aai.path.generic.vnf=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnfs=/aai/v11/network/generic-vnfs
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}
# dvs-switch
-org.openecomp.sdnc.sli.aai.path.dvsswitches=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches
-org.openecomp.sdnc.sli.aai.path.dvsswitch=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches/dvs-switch/{switch-name}
+org.onap.ccsdk.sli.adaptors.aai.path.dvsswitches=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches
+org.onap.ccsdk.sli.adaptors.aai.path.dvsswitch=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches/dvs-switch/{switch-name}
# L3 Networks
-org.openecomp.sdnc.sli.aai.path.l3networks=/aai/v8/network/l3-networks
-org.openecomp.sdnc.sli.aai.path.l3network=/aai/v8/network/l3-networks/l3-network/{network-id}
-org.openecomp.sdnc.sli.aai.path.l3network.query.name=/aai/v8/network/l3-networks/l3-network?network-name={network-name}
+org.onap.ccsdk.sli.adaptors.aai.path.l3networks=/aai/v11/network/l3-networks
+org.onap.ccsdk.sli.adaptors.aai.path.l3network=/aai/v11/network/l3-networks/l3-network/{network-id}
+org.onap.ccsdk.sli.adaptors.aai.path.l3network.query.name=/aai/v11/network/l3-networks/l3-network?network-name={network-name}
# P-Interfaces
-org.openecomp.sdnc.sli.aai.path.pserver.pinterfaces=/aai/v8/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces
-org.openecomp.sdnc.sli.aai.path.pserver.pinterface=/aai/v8/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pserver.pinterfaces=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces
+org.onap.ccsdk.sli.adaptors.aai.path.pserver.pinterface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{interface-name}
# Physical Link
-org.openecomp.sdnc.sli.aai.path.physical.links=/aai/v8/network/physical-links
-org.openecomp.sdnc.sli.aai.path.physical.link=/aai/v8/network/physical-links/physical-link/{link-name}
+org.onap.ccsdk.sli.adaptors.aai.path.physical.links=/aai/v11/network/physical-links
+org.onap.ccsdk.sli.adaptors.aai.path.physical.link=/aai/v11/network/physical-links/physical-link/{link-name}
# VPN Bindings
-org.openecomp.sdnc.sli.aai.path.vpn.bindings=/aai/v8/network/vpn-bindings/
-org.openecomp.sdnc.sli.aai.path.vpn.binding=/aai/v8/network/vpn-bindings/vpn-binding/{vpn-id}
+org.onap.ccsdk.sli.adaptors.aai.path.vpn.bindings=/aai/v11/network/vpn-bindings/
+org.onap.ccsdk.sli.adaptors.aai.path.vpn.binding=/aai/v11/network/vpn-bindings/vpn-binding/{vpn-id}
# VNF IMAGES
-org.openecomp.sdnc.sli.aai.path.vnf.images=/aai/v8/service-design-and-creation/vnf-images
-org.openecomp.sdnc.sli.aai.path.vnf.image=/aai/v8/service-design-and-creation/vnf-images/vnf-image/{att-uuid}
-org.openecomp.sdnc.sli.aai.path.vnf.image.query=/aai/v8/service-design-and-creation/vnf-images/vnf-image?application={application_model}&application-vendor={application_vendor}
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.images=/aai/v11/service-design-and-creation/vnf-images
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.image=/aai/v11/service-design-and-creation/vnf-images/vnf-image/{att-uuid}
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.image.query=/aai/v11/service-design-and-creation/vnf-images/vnf-image?application={application_model}&application-vendor={application_vendor}
# UBB Notify
-org.openecomp.sdnc.sli.aai.path.notify=/aai/v8/actions/notify
-org.openecomp.sdnc.sli.aai.notify.selflink.fqdn=https://aai-service.onap-aai:8443/restconf/config/L3SDN-API:services/layer3-service-list/{service-instance-id}
-org.openecomp.sdnc.sli.aai.notify.selflink.avpn=https://aai-service.onap-aai:8543/restconf/config/L3AVPN-EVC-API:services/service-list/{service-instance-id}/service-data/avpn-logicalchannel-information
+org.onap.ccsdk.sli.adaptors.aai.path.notify=/aai/v11/actions/notify
+org.onap.ccsdk.sli.adaptors.aai.notify.selflink.fqdn=https://aai-service.onap-aai:8443/restconf/config/L3SDN-API:services/layer3-service-list/{service-instance-id}
+org.onap.ccsdk.sli.adaptors.aai.notify.selflink.avpn=https://aai-service.onap-aai:8443/restconf/config/L3AVPN-EVC-API:services/service-list/{service-instance-id}/service-data/avpn-logicalchannel-information
# Service
-org.openecomp.sdnc.sli.aai.path.service=/aai/v8/service-design-and-creation/services/service/{service-id}
-org.openecomp.sdnc.sli.aai.path.services=/aai/v8/service-design-and-creation/services
+org.onap.ccsdk.sli.adaptors.aai.path.service=/aai/v11/service-design-and-creation/services/service/{service-id}
+org.onap.ccsdk.sli.adaptors.aai.path.services=/aai/v11/service-design-and-creation/services
#
@@ -151,104 +146,103 @@ org.openecomp.sdnc.sli.aai.path.services=/aai/v8/service-design-and-creation/ser
#
# VNFC
-org.openecomp.sdnc.sli.aai.path.vnfc=/aai/v8/network/vnfcs/vnfc/{vnfc-name}
+org.onap.ccsdk.sli.adaptors.aai.path.vnfc=/aai/v11/network/vnfcs/vnfc/{vnfc-name}
# class-of-service
-org.openecomp.sdnc.sli.aai.path.class.of.service=/aai/v8/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}/classes-of-service/class-of-service/{cos-id}
+org.onap.ccsdk.sli.adaptors.aai.path.class.of.service=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}/classes-of-service/class-of-service/{cos-id}
# site-pair
-org.openecomp.sdnc.sli.aai.path.site.pair=/aai/v8/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}
+org.onap.ccsdk.sli.adaptors.aai.path.site.pair=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}
# routing-instance
-org.openecomp.sdnc.sli.aai.path.routing.instance=/aai/v8/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}
+org.onap.ccsdk.sli.adaptors.aai.path.routing.instance=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}
# site-pair-set
-org.openecomp.sdnc.sli.aai.path.site.pair.set=/aai/v8/network/site-pair-sets/site-pair-set/{site-pair-set-id}
+org.onap.ccsdk.sli.adaptors.aai.path.site.pair.set=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}
# license key resource
-org.openecomp.sdnc.sli.aai.path.license.acquire=/aai/v8/actions/assignment/license-management/assignment-group-uuid/{assignment-group-uuid}
-org.openecomp.sdnc.sli.aai.path.license=/aai/v8/license-management/license-key-resources/license-key-resource/{att-uuid}
+org.onap.ccsdk.sli.adaptors.aai.path.license.acquire=/aai/v11/actions/assignment/license-management/assignment-group-uuid/{assignment-group-uuid}
+org.onap.ccsdk.sli.adaptors.aai.path.license=/aai/v11/license-management/license-key-resources/license-key-resource/{att-uuid}
# logical-link
-org.openecomp.sdnc.sli.aai.path.logical.link =/aai/v8/network/logical-links/logical-link/{link-name}
+org.onap.ccsdk.sli.adaptors.aai.path.logical.link =/aai/v11/network/logical-links/logical-link/{link-name}
# virtual-data-center
-org.openecomp.sdnc.sli.aai.path.virtual.data.center=/aai/v8/cloud-infrastructure/virtual-data-centers/virtual-data-center/{vdc-id}
+org.onap.ccsdk.sli.adaptors.aai.path.virtual.data.center=/aai/v11/cloud-infrastructure/virtual-data-centers/virtual-data-center/{vdc-id}
# wan-connector
-org.openecomp.sdnc.sli.aai.path.wan.connector=/aai/v8/business/connectors/connector/{resource-instance-id}
+org.onap.ccsdk.sli.adaptors.aai.path.wan.connector=/aai/v11/business/connectors/connector/{resource-instance-id}
# l-interface
-org.openecomp.sdnc.sli.aai.path.lag.interface.l.interface=/aai/v8/cloud-infrastructure/pservers/pserver/{hostname}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
-org.openecomp.sdnc.sli.aai.path.p.interface.l.interface=/aai/v8/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.lag.interface.l.interface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.p.interface.l.interface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
# l-interface pnf
-org.openecomp.sdnc.sli.aai.path.lag.interface.l.interface.pnf=/aai/v8/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
-org.openecomp.sdnc.sli.aai.path.p.interface.l.interface.pnf=/aai/v8/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.lag.interface.l.interface.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.p.interface.l.interface.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
# subinterface
-org.openecomp.sdnc.sli.aai.path.pnf.lag.interface.subinterface=/aai/v8/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
-org.openecomp.sdnc.sli.aai.path.pnf.p.interface.l.interface=/aai/v8/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pnf.lag.interface.subinterface=/aai/v11/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pnf.p.interface.l.interface=/aai/v11/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
# vlans
-org.openecomp.sdnc.sli.aai.path.vlan=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
-org.openecomp.sdnc.sli.aai.path.generic.vnf.vlan=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
+org.onap.ccsdk.sli.adaptors.aai.path.vlan=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf.vlan=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
# l3-interface-ipv4-address-list
-org.openecomp.sdnc.sli.aai.path.l3.interface.ipv4.address.list=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
-org.openecomp.sdnc.sli.aai.path.vlan.l3.interface.ipv4.address.list=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+org.onap.ccsdk.sli.adaptors.aai.path.l3.interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+org.onap.ccsdk.sli.adaptors.aai.path.vlan.l3.interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
# l3-interface-ipv6-address-list
-org.openecomp.sdnc.sli.aai.path.l3.interface.ipv6.address.list=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
-org.openecomp.sdnc.sli.aai.path.vlan.l3.interface.ipv6.address.list=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+org.onap.ccsdk.sli.adaptors.aai.path.l3.interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+org.onap.ccsdk.sli.adaptors.aai.path.vlan.l3.interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
# ipsec-configuration
-org.openecomp.sdnc.sli.aai.path.ipsec.configuration=/aai/v8/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}
+org.onap.ccsdk.sli.adaptors.aai.path.ipsec.configuration=/aai/v11/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}
# vig server
-org.openecomp.sdnc.sli.aai.path.vig.server=/aai/v8/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}/vig-servers/vig-server/{vig-address-type}
+org.onap.ccsdk.sli.adaptors.aai.path.vig.server=/aai/v11/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}/vig-servers/vig-server/{vig-address-type}
# l3-network
-org.openecomp.sdnc.sli.aai.path.l3.network=/aai/v8/network/l3-networks/l3-network/{network-id}
+org.onap.ccsdk.sli.adaptors.aai.path.l3.network=/aai/v11/network/l3-networks/l3-network/{network-id}
# subnet
-org.openecomp.sdnc.sli.aai.path.subnet=/aai/v8/network/l3-networks/l3-network/{network-id}/subnets/subnet/{subnet-id}
+org.onap.ccsdk.sli.adaptors.aai.path.subnet=/aai/v11/network/l3-networks/l3-network/{network-id}/subnets/subnet/{subnet-id}
# multicast-configuration
-org.openecomp.sdnc.sli.aai.path.multicast.configuration=/aai/v8/network/multicast-configurations/multicast-configuration/{multicast-configuration-id}
+org.onap.ccsdk.sli.adaptors.aai.path.multicast.configuration=/aai/v11/network/multicast-configurations/multicast-configuration/{multicast-configuration-id}
-# org.openecomp.sdnc.sli.aai.path.l.interface.ipv4.address.list
-org.openecomp.sdnc.sli.aai.path.l3-interface.ipv4.address.list=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.ipv4.address.list
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
-# org.openecomp.sdnc.sli.aai.path.l.interface.vlan.ipv4.address.list
-org.openecomp.sdnc.sli.aai.path.l3-interface.vlan.ipv4.address.list=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.vlan.ipv4.address.list
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.vlan.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
-# org.openecomp.sdnc.sli.aai.path.l.interface.ipv6.address.list
-org.openecomp.sdnc.sli.aai.path.l3-interface.ipv6.address.list=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.ipv6.address.list
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
# volume.group
-org.openecomp.sdnc.sli.aai.path.volume.group=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/volume-groups/volume-group/{volume-group-id}
+org.onap.ccsdk.sli.adaptors.aai.path.volume.group=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/volume-groups/volume-group/{volume-group-id}
#cloud region
-org.openecomp.sdnc.sli.aai.path.cloud.region=/aai/v8/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}
+org.onap.ccsdk.sli.adaptors.aai.path.cloud.region=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}
# vf-module
-org.openecomp.sdnc.sli.aai.path.vf.module=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/vf-modules/vf-module/{vf-module-id}
+org.onap.ccsdk.sli.adaptors.aai.path.vf.module=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/vf-modules/vf-module/{vf-module-id}
# l-interface through generic-vnf
-org.openecomp.sdnc.sli.aai.path.generic.vnf.linterface=/aai/v8/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf.linterface=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}
# network-policy
-org.openecomp.sdnc.sli.aai.path.network.policy=/aai/v8/network/network-policies/network-policy/{network-policy-id}
+org.onap.ccsdk.sli.adaptors.aai.path.network.policy=/aai/v11/network/network-policies/network-policy/{network-policy-id}
# pnf
-org.openecomp.sdnc.sli.aai.path.pnf=/aai/v8/network/pnfs/pnf/{pnf-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}
#
# Formatting
#
-org.openecomp.sdnc.sli.aai.param.format=filter=%s:%s
-org.openecomp.sdnc.sli.aai.param.vnf_type=vnf-type
-org.openecomp.sdnc.sli.aai.param.physical.location.id=physical-location-id
-org.openecomp.sdnc.sli.aai.param.service.type=service-type
-
+org.onap.ccsdk.sli.adaptors.aai.param.format=filter=%s:%s
+org.onap.ccsdk.sli.adaptors.aai.param.vnf_type=vnf-type
+org.onap.ccsdk.sli.adaptors.aai.param.physical.location.id=physical-location-id
+org.onap.ccsdk.sli.adaptors.aai.param.service.type=service-type
diff --git a/kubernetes/config/docker/init/src/config/appc/conf/appc.properties b/kubernetes/config/docker/init/src/config/appc/conf/appc.properties
index 8f7a569f1d..2b27ed20b7 100644
--- a/kubernetes/config/docker/init/src/config/appc/conf/appc.properties
+++ b/kubernetes/config/docker/init/src/config/appc/conf/appc.properties
@@ -7,9 +7,9 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -40,19 +40,25 @@ appc.service.logic.module.name=APPC
appc.topology.dg.method=topology-operation-all
appc.topology.dg.version=2.0.0
+# TEMP - Properties that might be needed to make the AAI-APPC connection
+org.openecomp.appc.db.url.appcctl=jdbc:mysql://dbhost:3306/appcctl
+org.openecomp.appc.db.user.appcctl=appcctl
+org.openecomp.appc.db.pass.appcctl=appcctl
-
+org.openecomp.appc.db.url.sdnctl=jdbc:mysql://dbhost:3306/sdnctl
+org.openecomp.appc.db.user.sdnctl=sdnctl
+org.openecomp.appc.db.pass.sdnctl=gamma
### ###
### OpenStack credentials (these properties also are used in appc-rest-adapter-bundle, appc-chef-adapter-bundle, appc-iaas-adapter-bundle) ###
### ###
-provider1.type=test
-provider1.name=test
-provider1.identity=http://identity1.appc.local/test
-provider1.tenant1.name=test
-provider1.tenant1.userid=test
-provider1.tenant1.password=test
+provider1.type=OpenStackProvider
+provider1.name=OpenStack
+provider1.identity=http://localhost:8181/apidoc/explorer/index.html
+provider1.tenant1.name=default
+provider1.tenant1.userid=admin
+provider1.tenant1.password=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
@@ -65,7 +71,7 @@ provider1.tenant1.password=test
### ###
-# Property below is valid in appc-command-executor-core, appc-license-manager-core, appc-lifecycle-management-core,
+# Property below is valid in appc-command-executor-core, appc-license-manager-core, appc-lifecycle-management-core,
# appc-request-handler-core, appc-workflow-management-core (all from the appc-dispatcher package).
dmaap.poolMembers=dmaap.onap-message-router:3904
@@ -97,14 +103,14 @@ test.vmid=test
test.url=http://api.appc.local/vm/9999999/test/99999999-9999-9999-9999-999999999999
-# Properties from default.properties in the src/test and src/main paths of appc-asdc-listener-bundle
-appc.asdc.host=sdc-be.onap-sdc:8443
-appc.asdc.env=APPC-ASDC-ENV
-appc.asdc.user=test
-appc.asdc.pass=test
-appc.asdc.consumer=APPC-ASDC-CONSUMER
-appc.asdc.consumer.id=APPC-ASDC-CONSUMER-ID
-appc.asdc.provider.url=http://localhost:8181/restconf/operations/AsdcMessage:configuration-document-request
+# Properties from default.properties in the src/test and src/main paths of appc-asdc-listener-bundle
+appc.sdc.host=sdc-be.onap-sdc:8443
+appc.sdc.env=APPC-ASDC-ENV
+appc.sdc.user=test
+appc.sdc.pass=test
+appc.sdc.consumer=APPC-ASDC-CONSUMER
+appc.sdc.consumer.id=APPC-ASDC-CONSUMER-ID
+appc.sdc.provider.url=http://localhost:8181/restconf/operations/AsdcMessage:configuration-document-request
# Properties used by EventSenderDmaapImpl.java
DCAE.dmaap.event.topic.write=EventSenderTest
diff --git a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql
index 7df7e29d46..880dbf1530 100644
--- a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql
+++ b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql
@@ -29,69 +29,88 @@ USE `mso_catalog`;
SOURCE ../../main-schemas/MySQL-Catalog-schema.sql
-LOCK TABLES `NETWORK_RECIPE` WRITE;
-/*!40000 ALTER TABLE `NETWORK_RECIPE` DISABLE KEYS */;
-INSERT INTO `NETWORK_RECIPE`(`ID`, `NETWORK_TYPE`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`) VALUES
-(1,'CONTRAIL_BASIC','CREATE','1',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL),
-(2,'CONTRAIL_BASIC','DELETE','1',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL),
-(3,'CONTRAIL_BASIC','UPDATE','1',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL),
-(4,'CONTRAIL_SHARED','CREATE','1',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL),
-(5,'CONTRAIL_SHARED','UPDATE','1',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL),
-(6,'CONTRAIL_SHARED','DELETE','1',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL),
-(7,'CONTRAIL_EXTERNAL','CREATE','1',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL),
-(8,'CONTRAIL_EXTERNAL','UPDATE','1',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL),
-(9,'CONTRAIL_EXTERNAL','DELETE','1',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL);
-
-/*!40000 ALTER TABLE `NETWORK_RECIPE` ENABLE KEYS */;
-UNLOCK TABLES;
-INSERT INTO `NETWORK_RECIPE`(`NETWORK_TYPE`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`) VALUES
-('CONTRAIL30_BASIC','CREATE','1',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL),
-('CONTRAIL30_BASIC','UPDATE','1',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL),
-('CONTRAIL30_BASIC','DELETE','1',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL),
-('CONTRAIL30_MPSCE','CREATE','1',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL),
-('CONTRAIL30_MPSCE','UPDATE','1',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL),
-('CONTRAIL30_MPSCE','DELETE','1',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL);
-
-
-LOCK TABLES `VNF_RECIPE` WRITE;
-/*!40000 ALTER TABLE `VNF_RECIPE` DISABLE KEYS */;
-INSERT INTO `VNF_RECIPE`(`ID`, `VNF_TYPE`, `VF_MODULE_ID`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`) VALUES
-(1,'*',NULL,'CREATE','1','Recipe Match All for VNFs if no custom flow exists','/mso/workflow/services/CreateGenericVNFV1',NULL,180,NULL),
-(2,'*',NULL,'DELETE','1','Recipe Match All for VNFs if no custom flow exists','/mso/async/services//deleteGenericVNFV1',NULL,180,NULL),
-(3,'*',NULL,'UPDATE','1','Recipe Match All for VNFs if no custom flow exists','/mso/workflow/services/updateGenericVNFV1',NULL,180,NULL),
-(4,NULL,'*','CREATE_VF_MODULE','1','Recipe Match All for VNFs if no custom flow exists','/mso/async/services/CreateVfModule',NULL,180,NULL),
-(5,NULL,'*','DELETE_VF_MODULE','1','Recipe Match All for VNFs if no custom flow exists','/mso/async/services/DeleteVfModule',NULL,180,NULL),
-(6,NULL,'*','UPDATE_VF_MODULE','1','Recipe Match All for VNFs if no custom flow exists','/mso/async/services/UpdateVfModule',NULL,180,NULL);
-/*!40000 ALTER TABLE `VNF_RECIPE` ENABLE KEYS */;
-UNLOCK TABLES;
-
-LOCK TABLES `VNF_COMPONENTS_RECIPE` WRITE;
-/*!40000 ALTER TABLE `VNF_COMPONENTS_RECIPE` DISABLE KEYS */;
-INSERT INTO `VNF_COMPONENTS_RECIPE`
-(`ID`, `VNF_TYPE`, `VF_MODULE_ID`, `ACTION`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`,`VNF_COMPONENT_TYPE`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`) VALUES
-(1,'*',NULL,'CREATE','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/createCinderVolumeV1','VOLUME_GROUP',NULL,180,NULL),
-(2,'*',NULL,'DELETE','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/deleteCinderVolumeV1','VOLUME_GROUP',NULL,180,NULL),
-(3,'*',NULL,'UPDATE','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/updateCinderVolumeV1','VOLUME_GROUP',NULL,180,NULL),
-(4,NULL,'*','CREATE_VF_MODULE_VOL','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/CreateVfModuleVolume','VOLUME_GROUP',NULL,180,NULL),
-(5,NULL,'*','DELETE_VF_MODULE_VOL','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/DeleteVfModuleVolume','VOLUME_GROUP',NULL,180,NULL),
-(6,NULL,'*','UPDATE_VF_MODULE_VOL','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/UpdateVfModuleVolume','VOLUME_GROUP',NULL,180,NULL);
-/*!40000 ALTER TABLE `VNF_COMPONENTS_RECIPE` ENABLE KEYS */;
-UNLOCK TABLES;
-
-INSERT INTO service (id, SERVICE_NAME, VERSION_STR, DESCRIPTION, SERVICE_NAME_VERSION_ID) VALUES ('4', 'VID_DEFAULT', '1.0', 'Default service for VID to use for infra APIH orchestration', 'MANUAL_RECORD');
-INSERT INTO service (id, SERVICE_NAME, VERSION_STR, DESCRIPTION, SERVICE_NAME_VERSION_ID) VALUES ('5', '*', '1.0', 'Default service to use for infra APIH orchestration', 'MANUAL_RECORD');
-INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('4', 'createInstance', '1', 'VID_DEFAULT recipe to create service-instance if no custom BPMN flow is found', '/mso/async/services/CreateGenericALaCarteServiceInstance', '180');
-INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('4', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete service-instance if no custom BPMN flow is found', '/mso/async/services/DelServiceInstance', '180');
-INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('5', 'createInstance', '1', 'DEFAULT recipe to create service-instance if no custom BPMN flow is found', '/mso/async/services/CreateGenericALaCarteServiceInstance', '180');
-INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('5', 'deleteInstance', '1', 'DEFAULT recipe to delete service-instance if no custom BPMN flow is found', '/mso/async/services/DeleteGenericALaCarteServiceInstance', '180');
-INSERT INTO vnf_recipe (VNF_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'createInstance', '1', 'VID_DEFAULT recipe to create VNF if no custom BPMN flow is found', '/mso/async/services/CreateVnfInfra', '180');
-INSERT INTO vnf_recipe (VNF_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete VNF if no custom BPMN flow is found', '/mso/async/services/DeleteVnfInfra', '180');
-INSERT INTO vnf_components_recipe (VNF_TYPE, VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES (NULL, 'volumeGroup', 'createInstance', '1', 'VID_DEFAULT recipe to create volume-group if no custom BPMN flow is found', '/mso/async/services/CreateVfModuleVolumeInfraV1', '180', 'VID_DEFAULT');
-INSERT INTO vnf_components_recipe (VNF_TYPE, VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES (NULL, 'volumeGroup', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete volume-group if no custom BPMN flow is found', '/mso/async/services/DeleteVfModuleVolumeInfraV1', '180', 'VID_DEFAULT');
-INSERT INTO vnf_components_recipe (VNF_TYPE, VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES (NULL, 'volumeGroup', 'updateInstance', '1', 'VID_DEFAULT recipe to update volume-group if no custom BPMN flow is found', '/mso/async/services/UpdateVfModuleVolumeInfraV1', '180', 'VID_DEFAULT');
-INSERT INTO vnf_components_recipe (VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES ('vfModule', 'createInstance', '1', 'VID_DEFAULT recipe to create vf-module if no custom BPMN flow is found', '/mso/async/services/CreateVfModuleInfra', '180', 'VID_DEFAULT');
-INSERT INTO vnf_components_recipe (VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES ('vfModule', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete vf-module if no custom BPMN flow is found', '/mso/async/services/DeleteVfModuleInfra', '180', 'VID_DEFAULT');
-INSERT INTO vnf_components_recipe (VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES ('vfModule', 'updateInstance', '1', 'VID_DEFAULT recipe to update vf-module if no custom BPMN flow is found', '/mso/async/services/UpdateVfModuleInfra', '180', 'VID_DEFAULT');
-INSERT INTO network_recipe (NETWORK_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'createInstance', '1.0', 'VID_DEFAULT recipe to create network if no custom BPMN flow is found', '/mso/async/services/CreateNetworkInstance', '180');
-INSERT INTO network_recipe (NETWORK_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'updateInstance', '1.0', 'VID_DEFAULT recipe to update network if no custom BPMN flow is found', '/mso/async/services/UpdateNetworkInstance', '180');
-INSERT INTO network_recipe (NETWORK_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'deleteInstance', '1.0', 'VID_DEFAULT recipe to delete network if no custom BPMN flow is found', '/mso/async/services/DeleteNetworkInstance', '180');
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','Contrail30-l2nodhcp','1',NULL,'heat_template_version: 2015-04-30\n\ndescription:\n HOT template that creates a Contrail Virtual Network with L2NODHCP\n\nparameters:\n network_name:\n type: string\n description: Name of direct network (e.g. core, dmz)\n default: ECOMPNetwork\n shared:\n type: boolean\n description: Shared amongst tenants\n default: False\n external:\n type: boolean\n description: router_external for the VirtualNetwork\n default: False\n route_targets:\n type: comma_delimited_list\n description: Network route-targets (RT)\n default: \"\"\n subnet_list:\n type: json\n description: Network subnets\n default: []\n policy_refs:\n type: comma_delimited_list\n description: Policies referenced by Network\n default: \"\"\n policy_refsdata:\n type: json\n description: Policies referenced by Network\n default: []\n route_table_refs:\n type: comma_delimited_list\n description: Route Tables referenced by Network\n default: \"\"\n virtual_network_properties_allow_transit:\n type: boolean\n description: allow_transit for the VirtualNetwork\n default: True\n virtual_network_properties_forwarding_mode:\n type: string\n description: forwarding_mode for the VirtualNetwork\n default: l2\n virtual_network_properties_rpf:\n type: string\n description: rpf for the VirtualNetwork\n default: disable\n flood_unknown_unicast:\n type: boolean\n description: flood_unknown_unicast for the VirtualNetwork\n default: True\n\noutputs:\n network_id:\n description: Openstack network identifier\n value: { get_resource: network }\n network_fqdn:\n description: Openstack network identifier\n value: {list_join: [\':\', { get_attr: [network, fq_name] } ] }\n\nresources:\n networkIpam:\n type: OS::ContrailV2::NetworkIpam\n properties:\n name: { get_param: network_name }\n\n network:\n type: OS::ContrailV2::VirtualNetwork\n properties:\n name: { get_param: network_name }\n is_shared: {get_param: shared}\n router_external: { get_param: external }\n route_target_list:\n {\n route_target_list_route_target: { get_param: route_targets }\n }\n network_ipam_refs: [{ get_resource: networkIpam }]\n network_ipam_refs_data:\n [\n {\n network_ipam_refs_data_ipam_subnets: { get_param: subnet_list }\n }\n ]\n network_policy_refs: { get_param: policy_refs }\n network_policy_refs_data: { get_param: policy_refsdata }\n route_table_refs: { get_param: route_table_refs }\n flood_unknown_unicast: { get_param: flood_unknown_unicast } \n virtual_network_properties:\n {\n virtual_network_properties_allow_transit: { get_param: virtual_network_properties_allow_transit },\n virtual_network_properties_forwarding_mode: { get_param: virtual_network_properties_forwarding_mode },\n virtual_network_properties_rpf: { get_param: virtual_network_properties_rpf },\n }\n',10,'MANUAL RECORD','2017-10-05 18:52:03');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','Contrail30-gndirect','1',NULL,'heat_template_version: 2015-04-30\n\ndescription:\n HOT template that creates a Contrail Virtual Network for GNDIRECT\n\nparameters:\n network_name:\n type: string\n description: Name of direct network (e.g. core, dmz)\n default: ECOMPNetwork\n shared:\n type: boolean\n description: Shared amongst tenants\n default: False\n external:\n type: boolean\n description: router_external for the VirtualNetwork\n default: False\n route_targets:\n type: comma_delimited_list\n description: Network route-targets (RT)\n default: \"\"\n subnet_list:\n type: json\n description: Network subnets\n default: []\n policy_refs:\n type: comma_delimited_list\n description: Policies referenced by Network\n default: \"\"\n policy_refsdata:\n type: json\n description: Policies referenced by Network\n default: []\n route_table_refs:\n type: comma_delimited_list\n description: Route Tables referenced by Network\n default: \"\"\n virtual_network_properties_rpf:\n type: string\n description: rpf for the VirtualNetwork\n default: disable\n\noutputs:\n network_id:\n description: Openstack network identifier\n value: { get_resource: network }\n network_fqdn:\n description: Openstack network identifier\n value: {list_join: [\':\', { get_attr: [network, fq_name] } ] }\n\nresources:\n networkIpam:\n type: OS::ContrailV2::NetworkIpam\n properties:\n name: { get_param: network_name }\n\n network:\n type: OS::ContrailV2::VirtualNetwork\n properties:\n name: { get_param: network_name }\n is_shared: {get_param: shared}\n router_external: { get_param: external }\n route_target_list:\n {\n route_target_list_route_target: { get_param: route_targets }\n }\n network_ipam_refs: [{ get_resource: networkIpam }]\n network_ipam_refs_data:\n [\n {\n network_ipam_refs_data_ipam_subnets: { get_param: subnet_list }\n }\n ]\n network_policy_refs: { get_param: policy_refs }\n network_policy_refs_data: { get_param: policy_refsdata }\n route_table_refs: { get_param: route_table_refs }\n virtual_network_properties:\n {\n virtual_network_properties_rpf: { get_param: virtual_network_properties_rpf }\n }\n',10,'MANUAL RECORD','2017-10-05 18:52:03');
+
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','external','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','flood_unknown_unicast','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','network_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','policy_refs','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','policy_refsdata','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','route_table_refs','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','route_targets','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','shared','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','virtual_network_properties_allow_transit','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','virtual_network_properties_forwarding_mode','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','virtual_network_properties_rpf','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','external','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','network_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','policy_refs','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','policy_refsdata','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','route_table_refs','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','route_targets','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','shared','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','virtual_network_properties_rpf','\0','string',NULL);
+
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (1,'CONTRAIL_BASIC','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (2,'CONTRAIL_BASIC','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (3,'CONTRAIL_BASIC','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (4,'CONTRAIL_SHARED','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (5,'CONTRAIL_SHARED','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (6,'CONTRAIL_SHARED','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (7,'CONTRAIL_EXTERNAL','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (8,'CONTRAIL_EXTERNAL','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (9,'CONTRAIL_EXTERNAL','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (10,'CONTRAIL30_BASIC','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (11,'CONTRAIL30_BASIC','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (12,'CONTRAIL30_BASIC','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (13,'CONTRAIL30_MPSCE','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (14,'CONTRAIL30_MPSCE','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (15,'CONTRAIL30_MPSCE','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (16,'VID_DEFAULT','createInstance','VID_DEFAULT recipe to create network if no custom BPMN flow is found','/mso/async/services/CreateNetworkInstance',NULL,180,NULL,'2017-10-05 18:52:03','1.0');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (17,'VID_DEFAULT','updateInstance','VID_DEFAULT recipe to update network if no custom BPMN flow is found','/mso/async/services/UpdateNetworkInstance',NULL,180,NULL,'2017-10-05 18:52:03','1.0');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (18,'VID_DEFAULT','deleteInstance','VID_DEFAULT recipe to delete network if no custom BPMN flow is found','/mso/async/services/DeleteNetworkInstance',NULL,180,NULL,'2017-10-05 18:52:03','1.0');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (19,'CONTRAIL30_L2NODHCP','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (20,'CONTRAIL30_L2NODHCP','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (21,'CONTRAIL30_L2NODHCP','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (22,'CONTRAIL30_GNDIRECT','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (23,'CONTRAIL30_GNDIRECT','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (24,'CONTRAIL30_GNDIRECT','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('48cc36cc-a9fe-11e7-8b4b-0242ac120002','VID_DEFAULT','48cd56c8-a9fe-11e7-8b4b-0242ac120002','1.0','Default service for VID to use for infra APIH orchestration1707MIGRATED1707MIGRATED','2017-10-05 18:52:03',NULL);
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('48cc3acd-a9fe-11e7-8b4b-0242ac120002','*','48ce2256-a9fe-11e7-8b4b-0242ac120002','1.0','Default service to use for infra APIH orchestration1707MIGRATED1707MIGRATED','2017-10-05 18:52:03',NULL);
+
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (1,'createInstance','1','VID_DEFAULT recipe to create service-instance if no custom BPMN flow is found','/mso/async/services/CreateGenericALaCarteServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','48cc36cc-a9fe-11e7-8b4b-0242ac120002');
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (2,'deleteInstance','1','VID_DEFAULT recipe to delete service-instance if no custom BPMN flow is found','/mso/async/services/DeleteGenericALaCarteServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','48cc36cc-a9fe-11e7-8b4b-0242ac120002');
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (3,'createInstance','1','DEFAULT recipe to create service-instance if no custom BPMN flow is found','/mso/async/services/CreateGenericALaCarteServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','48cc3acd-a9fe-11e7-8b4b-0242ac120002');
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (4,'deleteInstance','1','DEFAULT recipe to delete service-instance if no custom BPMN flow is found','/mso/async/services/DeleteGenericALaCarteServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','48cc3acd-a9fe-11e7-8b4b-0242ac120002');
+
+INSERT INTO `temp_network_heat_template_lookup` (`NETWORK_RESOURCE_MODEL_NAME`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`) VALUES ('CONTRAIL30_GNDIRECT','4885c7a1-a9fe-11e7-8b4b-0242ac120002','3.0',NULL);
+INSERT INTO `temp_network_heat_template_lookup` (`NETWORK_RESOURCE_MODEL_NAME`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`) VALUES ('CONTRAIL30_L2NODHCP','4885c198-a9fe-11e7-8b4b-0242ac120002','3.0',NULL);
+
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (1,'*','VOLUME_GROUP','CREATE','','1','Recipe Match All for','/mso/async/services/createCinderVolumeV1','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (2,'*','VOLUME_GROUP','DELETE','','1','Recipe Match All for','/mso/async/services/deleteCinderVolumeV1','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (3,'*','VOLUME_GROUP','UPDATE','','1','Recipe Match All for','/mso/async/services/updateCinderVolumeV1','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (4,NULL,'VOLUME_GROUP','CREATE_VF_MODULE_VOL','','1','Recipe Match All for','/mso/async/services/CreateVfModuleVolume','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (5,NULL,'VOLUME_GROUP','DELETE_VF_MODULE_VOL','','1','Recipe Match All for','/mso/async/services/DeleteVfModuleVolume','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (6,NULL,'VOLUME_GROUP','UPDATE_VF_MODULE_VOL','','1','Recipe Match All for','/mso/async/services/UpdateVfModuleVolume','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (7,NULL,'volumeGroup','createInstance','','1','VID_DEFAULT recipe t','/mso/async/services/CreateVfModuleVolumeInfraV1','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (8,NULL,'volumeGroup','deleteInstance','','1','VID_DEFAULT recipe t','/mso/async/services/DeleteVfModuleVolumeInfraV1','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (9,NULL,'volumeGroup','updateInstance','','1','VID_DEFAULT recipe t','/mso/async/services/UpdateVfModuleVolumeInfraV1','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (10,NULL,'vfModule','createInstance','','1','VID_DEFAULT recipe t','/mso/async/services/CreateVfModuleInfra','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (11,NULL,'vfModule','deleteInstance','','1','VID_DEFAULT recipe t','/mso/async/services/DeleteVfModuleInfra','','180',2147483647,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (12,NULL,'vfModule','updateInstance','','1','VID_DEFAULT recipe t','/mso/async/services/UpdateVfModuleInfra','','180',2147483647,'2017-10-05 18:52:03');
+
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (1,NULL,'CREATE',NULL,'1','*','Recipe Match All for VNFs if no custom flow exists','/mso/workflow/services/CreateGenericVNFV1',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (2,NULL,'DELETE',NULL,'1','*','Recipe Match All for VNFs if no custom flow exists','/mso/async/services//deleteGenericVNFV1',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (3,NULL,'UPDATE',NULL,'1','*','Recipe Match All for VNFs if no custom flow exists','/mso/workflow/services/updateGenericVNFV1',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (4,'*','CREATE_VF_MODULE',NULL,'1',NULL,'Recipe Match All for VNFs if no custom flow exists','/mso/async/services/CreateVfModule',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (5,'*','DELETE_VF_MODULE',NULL,'1',NULL,'Recipe Match All for VNFs if no custom flow exists','/mso/async/services/DeleteVfModule',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (6,'*','UPDATE_VF_MODULE',NULL,'1',NULL,'Recipe Match All for VNFs if no custom flow exists','/mso/async/services/UpdateVfModule',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (7,NULL,'createInstance',NULL,'1','VID_DEFAULT','VID_DEFAULT recipe to create VNF if no custom BPMN flow is found','/mso/async/services/CreateVnfInfra',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (8,NULL,'deleteInstance',NULL,'1','VID_DEFAULT','VID_DEFAULT recipe to delete VNF if no custom BPMN flow is found','/mso/async/services/DeleteVnfInfra',NULL,180,'2017-10-05 18:52:03');
+
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
diff --git a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql
index 9ddc9ff0d3..b5063defda 100644
--- a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql
+++ b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql
@@ -12,74 +12,66 @@ CREATE USER 'catalog';
GRANT ALL on mso_catalog.* to 'catalog' identified by 'catalog123' with GRANT OPTION;
FLUSH PRIVILEGES;
-LOCK TABLES `heat_environment` WRITE;
-/*!40000 ALTER TABLE `heat_environment` DISABLE KEYS */;
-INSERT INTO `heat_environment` VALUES (3,'base_vlb.env','1.0','dns-service/DNSResource-1','BASE VLB ENV file','parameters:\n vlb_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vlb_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n vlb_private_net_id: zdfw1lb01_private\n ecomp_private_net_id: oam_ecomp\n vlb_private_net_cidr: 192.168.10.0/24\n ecomp_private_net_cidr: 192.168.9.0/24\n vlb_private_ip_0: 192.168.10.111\n vlb_private_ip_1: 192.168.9.111\n vdns_private_ip_0: 192.168.10.211\n vdns_private_ip_1: 192.168.9.211\n vlb_name_0: zdfw1lb01lb01\n vdns_name_0: zdfw1lb01dns01\n vnf_id: vLoadBalancer_demo_app\n vf_module_id: vLoadBalancer\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vlb_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','2016-11-14 13:04:07','EnvArtifact-UUID1','Label');
-INSERT INTO `heat_environment` VALUES (4,'dnsscaling.env','1.0','dns-service/DNSResource-1','DNS Scaling ENV file','parameters:\n vlb_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vlb_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n vlb_private_net_id: zdfw1lb01_private\n ecomp_private_net_id: oam_ecomp\n vlb_private_ip_0: 192.168.10.111\n vlb_private_ip_1: 192.168.9.111\n vdns_private_ip_0: 192.168.10.222\n vdns_private_ip_1: 192.168.9.222\n vdns_name_0: zdfw1lb01dns02\n vnf_id: vLoadBalancer_demo_app\n vf_module_id: vLoadBalancer\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vlb_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','2016-11-14 13:04:07','EnvArtifact-UUID2','Label');
-/*!40000 ALTER TABLE `heat_environment` ENABLE KEYS */;
-UNLOCK TABLES;
-
-LOCK TABLES `heat_template` WRITE;
-/*!40000 ALTER TABLE `heat_template` DISABLE KEYS */;
-INSERT INTO `heat_template` VALUES (6,'base_vlb.yaml','1.0','DNSResource','base_vlb.yaml','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy vLoadBalancer/vDNS demo app for OpenECOMP\n\nparameters:\n vlb_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vlb_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n vlb_private_net_id:\n type: string\n label: vLoadBalancer private network name or ID\n description: Private network that connects vLoadBalancer with vDNSs\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n vlb_private_net_cidr:\n type: string\n label: vLoadBalancer private network CIDR\n description: The CIDR of the vLoadBalancer private network\n ecomp_private_net_cidr:\n type: string\n label: ECOMP private network CIDR\n description: The CIDR of the protected private network\n vlb_private_ip_0:\n type: string\n label: vLoadBalancer private IP address towards the private network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with the vDNSs\n vlb_private_ip_1:\n type: string\n label: vLoadBalancer private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with ECOMP components\n vdns_private_ip_0:\n type: string\n label: vDNS private IP address towards the private network\n description: Private IP address that is assigned to the vDNS to communicate with the vLoadBalancer\n vdns_private_ip_1:\n type: string\n label: vDNS private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vDNS to communicate with ECOMP components\n vlb_name_0:\n type: string\n label: vLoadBalancer name\n description: Name of the vLoadBalancer\n vdns_name_0:\n type: string\n label: vDNS name\n description: Name of the vDNS\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vLoadBalancer Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n my_keypair:\n type: OS::Nova::KeyPair\n properties:\n name: { get_param: key_name }\n public_key: { get_param: pub_key }\n save_private_key: false\n\n vlb_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: vlb_private_net_id }\n\n vlb_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n name: { get_param: vlb_private_net_id }\n network_id: { get_resource: vlb_private_network }\n cidr: { get_param: vlb_private_net_cidr }\n\n vlb_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vlb_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vlb_private_0_port }\n - port: { get_resource: vlb_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __dcae_collector_ip__: { get_param: dcae_collector_ip }\n __local_private_ipaddr__: { get_param: vlb_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n DCAE_COLLECTOR_IP=__dcae_collector_ip__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_lb_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vlb.sh\n chmod +x v_lb_init.sh\n chmod +x vlb.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $DCAE_COLLECTOR_IP > config/dcae_collector_ip.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo "no" > config/install.txt\n LOCAL_PUBLIC_IPADDR=$(ifconfig eth0 | grep "inet addr" | tr -s \' \' | cut -d\' \' -f3 | cut -d\':\' -f2)\n echo $LOCAL_PUBLIC_IPADDR > config/local_public_ipaddr.txt\n mv vlb.sh /etc/init.d\n update-rc.d vlb.sh defaults\n ./v_lb_init.sh\n\n vlb_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: vlb_private_network }\n fixed_ips: [{"subnet": { get_resource: vlb_private_subnet }, "ip_address": { get_param: vlb_private_ip_0 }}]\n\n vlb_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vlb_private_ip_1 }}]\n\n vdns_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vdns_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vdns_private_0_port }\n - port: { get_resource: vdns_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __lb_oam_int__ : { get_param: vlb_private_ip_1 }\n __lb_private_ipaddr__: { get_param: vlb_private_ip_0 }\n __local_private_ipaddr__: { get_param: vdns_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n LB_OAM_INT=__lb_oam_int__\n LB_PRIVATE_IPADDR=__lb_private_ipaddr__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_dns_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vdns.sh\n chmod +x v_dns_init.sh\n chmod +x vdns.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $LB_OAM_INT > config/lb_oam_int.txt\n echo $LB_PRIVATE_IPADDR > config/lb_private_ipaddr.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo "no" > config/install.txt\n mv vdns.sh /etc/init.d\n update-rc.d vdns.sh defaults\n ./v_dns_init.sh\n\n vdns_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: vlb_private_network }\n fixed_ips: [{"subnet": { get_resource: vlb_private_subnet }, "ip_address": { get_param: vdns_private_ip_0 }}]\n\n vdns_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vdns_private_ip_1 }}]\n',300,'Artifact-UUID1','Base VLB Heat','label','2016-11-14 13:04:07',NULL);
-INSERT INTO `heat_template` VALUES (7,'dnsscaling.yaml','1.0','DNSResource','dnsscaling.yaml','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy a vDNS for OpenECOMP (scaling-up scenario)\n\nparameters:\n vlb_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vlb_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n vlb_private_net_id:\n type: string\n label: vLoadBalancer private network name or ID\n description: Private network that connects vLoadBalancer with vDNSs\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n vlb_private_ip_0:\n type: string\n label: vLoadBalancer private IP address towards the private network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with the vDNSs\n vlb_private_ip_1:\n type: string\n label: vLoadBalancer private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with ECOMP components\n vdns_private_ip_0:\n type: string\n label: vDNS private IP address towards the private network\n description: Private IP address that is assigned to the vDNS to communicate with the vLoadBalancer\n vdns_private_ip_1:\n type: string\n label: vDNS private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vDNS to communicate with ECOMP components\n vdns_name_0:\n type: string\n label: vDNS name\n description: Name of the vDNS\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vLoadBalancer Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n vdns_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vdns_name_0 }\n key_name: { get_param: key_name }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vdns_private_0_port }\n - port: { get_resource: vdns_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __lb_oam_int__ : { get_param: vlb_private_ip_1 }\n __lb_private_ipaddr__: { get_param: vlb_private_ip_0 }\n __local_private_ipaddr__: { get_param: vdns_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n LB_OAM_INT=__lb_oam_int__\n LB_PRIVATE_IPADDR=__lb_private_ipaddr__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_dns_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vdns.sh\n chmod +x v_dns_init.sh\n chmod +x vdns.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $LB_OAM_INT > config/lb_oam_int.txt\n echo $LB_PRIVATE_IPADDR > config/lb_private_ipaddr.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo "no" > config/install.txt\n mv vdns.sh /etc/init.d\n update-rc.d vdns.sh defaults\n ./v_dns_init.sh\n\n vdns_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: vlb_private_net_id }\n fixed_ips: [{"subnet": { get_param: vlb_private_net_id }, "ip_address": { get_param: vdns_private_ip_0 }}]\n\n vdns_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vdns_private_ip_1 }}]\n',300,'Artifact-UUID2','DNS Scaling Heat','label','2016-11-14 13:04:07',NULL);
-/*!40000 ALTER TABLE `heat_template` ENABLE KEYS */;
-UNLOCK TABLES;
-
-LOCK TABLES `heat_template_params` WRITE;
-/*!40000 ALTER TABLE `heat_template_params` DISABLE KEYS */;
-INSERT INTO `heat_template_params` VALUES (110,6,'vlb_flavor_name','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (111,6,'vlb_private_ip_1','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (112,6,'dcae_collector_ip','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (113,6,'vlb_private_net_cidr','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (114,6,'ecomp_private_net_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (115,6,'vnf_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (116,6,'key_name','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (117,6,'pub_key','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (118,6,'vlb_private_net_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (119,6,'webserver_ip','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (120,6,'vdns_private_ip_1','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (121,6,'public_net_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (122,6,'vlb_private_ip_0','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (123,6,'vlb_name_0','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (124,6,'vdns_private_ip_0','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (125,6,'vdsn_name_0','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (126,6,'ecomp_private_net_cidr','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (127,6,'vf_module_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (128,6,'vlb_image_name','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (129,7,'vnf_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (130,7,'vf_module_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (131,7,'vlb_flavor_name','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (132,7,'vlb_image_name','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (133,7,'vdns_private_ip_1','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (134,7,'dcae_collector_ip','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (135,7,'webserver_ip','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (136,7,'vlb_private_net_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (137,7,'vdns_private_ip_0','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (138,7,'vdsn_name_0','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (139,7,'vlb_private_ip_0','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (140,7,'pub_key','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (141,7,'public_net_id','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (142,7,'key_name','\1','string',NULL);
-INSERT INTO `heat_template_params` VALUES (143,7,'ecomp_private_net_id','\1','string',NULL);
-/*!40000 ALTER TABLE `heat_template_params` ENABLE KEYS */;
-UNLOCK TABLES;
-
-LOCK TABLES `service` WRITE;
-/*!40000 ALTER TABLE `service` DISABLE KEYS */;
-INSERT INTO `service` VALUES (10,'dns-service','1.0','dns service for unit test','1e34774e-715e-4fd6-bd09-7b654622f35i',NULL,NULL,'2016-11-14 13:04:07','585822c8-4027-4f84-ba50-e9248606f111');
-/*!40000 ALTER TABLE `service` ENABLE KEYS */;
-UNLOCK TABLES;
-
-LOCK TABLES `vf_module` WRITE;
-/*!40000 ALTER TABLE `vf_module` DISABLE KEYS */;
-INSERT INTO `vf_module` VALUES (7,'dns-service/DNSResource-1::VF_RI1_DNS::module-1','1.0','VF_RI1_DNS::module-1','1.0','1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-1::module-1.group',NULL,6,1,'2016-11-14 13:04:07',NULL,NULL,6,3,'585822c7-4027-4f84-ba50-e9248606f132');
-INSERT INTO `vf_module` VALUES (8,'dns-service/DNSResource-1::VF_RI1_DNS::module-2','1.0','VF_RI1_DNS::module-2','1.0','1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-2::module-1.group',NULL,7,0,'2016-11-14 13:04:07',NULL,NULL,6,4,'585822c7-4027-4f84-ba50-e9248606f133');
-/*!40000 ALTER TABLE `vf_module` ENABLE KEYS */;
-UNLOCK TABLES;
-
-LOCK TABLES `vnf_resource` WRITE;
-/*!40000 ALTER TABLE `vnf_resource` DISABLE KEYS */;
-INSERT INTO `vnf_resource` VALUES (6,'dns-service/DNSResource-1','1.0','HEAT','dns service for unit test',NULL,NULL,'2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f131',NULL,NULL,'585822c7-4027-4f84-ba50-e9248606f112','1.0','DNSResource-1','DNSResource','585822c8-4027-4f84-ba50-e9248606f111');
-/*!40000 ALTER TABLE `vnf_resource` ENABLE KEYS */;
-UNLOCK TABLES; \ No newline at end of file
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
+
+INSERT INTO `heat_environment` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('EnvArtifact-UUID1','base_vlb.env','1.0','BASE VLB ENV file','parameters:\n vlb_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vlb_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n vlb_private_net_id: zdfw1lb01_private\n ecomp_private_net_id: oam_ecomp\n vlb_private_net_cidr: 192.168.10.0/24\n ecomp_private_net_cidr: 192.168.9.0/24\n vlb_private_ip_0: 192.168.10.111\n vlb_private_ip_1: 192.168.9.111\n vdns_private_ip_0: 192.168.10.211\n vdns_private_ip_1: 192.168.9.211\n vlb_name_0: zdfw1lb01lb01\n vdns_name_0: zdfw1lb01dns01\n vnf_id: vLoadBalancer_demo_app\n vf_module_id: vLoadBalancer\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vlb_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_environment` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('EnvArtifact-UUID2','dnsscaling.env','1.0','DNS Scaling ENV file','parameters:\n vlb_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vlb_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n vlb_private_net_id: zdfw1lb01_private\n ecomp_private_net_id: oam_ecomp\n vlb_private_ip_0: 192.168.10.111\n vlb_private_ip_1: 192.168.9.111\n vdns_private_ip_0: 192.168.10.222\n vdns_private_ip_1: 192.168.9.222\n vdns_name_0: zdfw1lb01dns02\n vnf_id: vLoadBalancer_demo_app\n vf_module_id: vLoadBalancer\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vlb_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('Artifact-UUID1','base_vlb.yaml','1.0','Base VLB Heat','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy vLoadBalancer/vDNS demo app for OpenECOMP\n\nparameters:\n vlb_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vlb_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n vlb_private_net_id:\n type: string\n label: vLoadBalancer private network name or ID\n description: Private network that connects vLoadBalancer with vDNSs\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n vlb_private_net_cidr:\n type: string\n label: vLoadBalancer private network CIDR\n description: The CIDR of the vLoadBalancer private network\n ecomp_private_net_cidr:\n type: string\n label: ECOMP private network CIDR\n description: The CIDR of the protected private network\n vlb_private_ip_0:\n type: string\n label: vLoadBalancer private IP address towards the private network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with the vDNSs\n vlb_private_ip_1:\n type: string\n label: vLoadBalancer private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with ECOMP components\n vdns_private_ip_0:\n type: string\n label: vDNS private IP address towards the private network\n description: Private IP address that is assigned to the vDNS to communicate with the vLoadBalancer\n vdns_private_ip_1:\n type: string\n label: vDNS private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vDNS to communicate with ECOMP components\n vlb_name_0:\n type: string\n label: vLoadBalancer name\n description: Name of the vLoadBalancer\n vdns_name_0:\n type: string\n label: vDNS name\n description: Name of the vDNS\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vLoadBalancer Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n my_keypair:\n type: OS::Nova::KeyPair\n properties:\n name: { get_param: key_name }\n public_key: { get_param: pub_key }\n save_private_key: false\n\n vlb_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: vlb_private_net_id }\n\n vlb_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n name: { get_param: vlb_private_net_id }\n network_id: { get_resource: vlb_private_network }\n cidr: { get_param: vlb_private_net_cidr }\n\n vlb_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vlb_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vlb_private_0_port }\n - port: { get_resource: vlb_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __dcae_collector_ip__: { get_param: dcae_collector_ip }\n __local_private_ipaddr__: { get_param: vlb_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n DCAE_COLLECTOR_IP=__dcae_collector_ip__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_lb_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vlb.sh\n chmod +x v_lb_init.sh\n chmod +x vlb.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $DCAE_COLLECTOR_IP > config/dcae_collector_ip.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo \"no\" > config/install.txt\n LOCAL_PUBLIC_IPADDR=$(ifconfig eth0 | grep \"inet addr\" | tr -s \' \' | cut -d\' \' -f3 | cut -d\':\' -f2)\n echo $LOCAL_PUBLIC_IPADDR > config/local_public_ipaddr.txt\n mv vlb.sh /etc/init.d\n update-rc.d vlb.sh defaults\n ./v_lb_init.sh\n\n vlb_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: vlb_private_network }\n fixed_ips: [{\"subnet\": { get_resource: vlb_private_subnet }, \"ip_address\": { get_param: vlb_private_ip_0 }}]\n\n vlb_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vlb_private_ip_1 }}]\n\n vdns_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vdns_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vdns_private_0_port }\n - port: { get_resource: vdns_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __lb_oam_int__ : { get_param: vlb_private_ip_1 }\n __lb_private_ipaddr__: { get_param: vlb_private_ip_0 }\n __local_private_ipaddr__: { get_param: vdns_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n LB_OAM_INT=__lb_oam_int__\n LB_PRIVATE_IPADDR=__lb_private_ipaddr__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_dns_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vdns.sh\n chmod +x v_dns_init.sh\n chmod +x vdns.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $LB_OAM_INT > config/lb_oam_int.txt\n echo $LB_PRIVATE_IPADDR > config/lb_private_ipaddr.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo \"no\" > config/install.txt\n mv vdns.sh /etc/init.d\n update-rc.d vdns.sh defaults\n ./v_dns_init.sh\n\n vdns_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: vlb_private_network }\n fixed_ips: [{\"subnet\": { get_resource: vlb_private_subnet }, \"ip_address\": { get_param: vdns_private_ip_0 }}]\n\n vdns_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vdns_private_ip_1 }}]\n',300,'MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('Artifact-UUID2','dnsscaling.yaml','1.0','DNS Scaling Heat','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy a vDNS for OpenECOMP (scaling-up scenario)\n\nparameters:\n vlb_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vlb_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n vlb_private_net_id:\n type: string\n label: vLoadBalancer private network name or ID\n description: Private network that connects vLoadBalancer with vDNSs\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n vlb_private_ip_0:\n type: string\n label: vLoadBalancer private IP address towards the private network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with the vDNSs\n vlb_private_ip_1:\n type: string\n label: vLoadBalancer private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with ECOMP components\n vdns_private_ip_0:\n type: string\n label: vDNS private IP address towards the private network\n description: Private IP address that is assigned to the vDNS to communicate with the vLoadBalancer\n vdns_private_ip_1:\n type: string\n label: vDNS private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vDNS to communicate with ECOMP components\n vdns_name_0:\n type: string\n label: vDNS name\n description: Name of the vDNS\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vLoadBalancer Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n vdns_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vdns_name_0 }\n key_name: { get_param: key_name }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vdns_private_0_port }\n - port: { get_resource: vdns_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __lb_oam_int__ : { get_param: vlb_private_ip_1 }\n __lb_private_ipaddr__: { get_param: vlb_private_ip_0 }\n __local_private_ipaddr__: { get_param: vdns_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n LB_OAM_INT=__lb_oam_int__\n LB_PRIVATE_IPADDR=__lb_private_ipaddr__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_dns_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vdns.sh\n chmod +x v_dns_init.sh\n chmod +x vdns.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $LB_OAM_INT > config/lb_oam_int.txt\n echo $LB_PRIVATE_IPADDR > config/lb_private_ipaddr.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo \"no\" > config/install.txt\n mv vdns.sh /etc/init.d\n update-rc.d vdns.sh defaults\n ./v_dns_init.sh\n\n vdns_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: vlb_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: vlb_private_net_id }, \"ip_address\": { get_param: vdns_private_ip_0 }}]\n\n vdns_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vdns_private_ip_1 }}]\n',300,'MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','dcae_collector_ip','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','ecomp_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','ecomp_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','key_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','public_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','pub_key','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vdns_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vdns_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vdsn_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vf_module_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_flavor_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_image_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vnf_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','webserver_ip','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','dcae_collector_ip','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','ecomp_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','key_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','public_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','pub_key','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vdns_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vdns_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vdsn_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vf_module_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vlb_flavor_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vlb_image_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vlb_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vlb_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vnf_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','webserver_ip','','string',NULL);
+
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('1e34774e-715e-4fd6-bd09-7b654622f35i','dns-service','585822c8-4027-4f84-ba50-e9248606f111','1.0','dns service for unit test','2016-11-14 13:04:07',NULL);
+
+INSERT INTO `service_to_resource_customizations` (`SERVICE_MODEL_UUID`, `RESOURCE_MODEL_CUSTOMIZATION_UUID`, `MODEL_TYPE`, `CREATION_TIMESTAMP`) VALUES ('1e34774e-715e-4fd6-bd09-7b654622f35i','302aa6be-a9fe-11e7-8b4b-0242ac120002','vnf','2017-10-05 18:51:28');
+
+INSERT INTO `vf_module` (`MODEL_UUID`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `DESCRIPTION`, `IS_BASE`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `VOL_HEAT_TEMPLATE_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-1::module-1.group','585822c7-4027-4f84-ba50-e9248606f132','1.0','VF_RI1_DNS::module-1',NULL,1,'Artifact-UUID1',NULL,'2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f131');
+INSERT INTO `vf_module` (`MODEL_UUID`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `DESCRIPTION`, `IS_BASE`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `VOL_HEAT_TEMPLATE_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-2::module-1.group','585822c7-4027-4f84-ba50-e9248606f133','1.0','VF_RI1_DNS::module-2',NULL,0,'Artifact-UUID2',NULL,'2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f131');
+
+INSERT INTO `vf_module_customization` (`MODEL_CUSTOMIZATION_UUID`, `LABEL`, `INITIAL_COUNT`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_COUNT`, `HEAT_ENVIRONMENT_ARTIFACT_UUID`, `VOL_ENVIRONMENT_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VF_MODULE_MODEL_UUID`) VALUES ('30316d81-a9fe-11e7-8b4b-0242ac120002',NULL,1,0,NULL,NULL,'EnvArtifact-UUID1',NULL,'2017-10-05 18:51:25','1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-1::module-1.group');
+INSERT INTO `vf_module_customization` (`MODEL_CUSTOMIZATION_UUID`, `LABEL`, `INITIAL_COUNT`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_COUNT`, `HEAT_ENVIRONMENT_ARTIFACT_UUID`, `VOL_ENVIRONMENT_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VF_MODULE_MODEL_UUID`) VALUES ('303170ae-a9fe-11e7-8b4b-0242ac120002',NULL,0,0,NULL,NULL,'EnvArtifact-UUID2',NULL,'2017-10-05 18:51:25','1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-2::module-1.group');
+INSERT INTO `vnf_res_custom_to_vf_module_custom` (`VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID`, `VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID`, `CREATION_TIMESTAMP`) VALUES ('302aa6be-a9fe-11e7-8b4b-0242ac120002','30316d81-a9fe-11e7-8b4b-0242ac120002','2017-10-05 18:51:26');
+
+INSERT INTO `vnf_res_custom_to_vf_module_custom` (`VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID`, `VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID`, `CREATION_TIMESTAMP`) VALUES ('302aa6be-a9fe-11e7-8b4b-0242ac120002','303170ae-a9fe-11e7-8b4b-0242ac120002','2017-10-05 18:51:26');
+
+INSERT INTO `vnf_resource` (`ORCHESTRATION_MODE`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `MODEL_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `TOSCA_NODE_TYPE`, `HEAT_TEMPLATE_ARTIFACT_UUID`) VALUES ('HEAT','dns service for unit test1707MIGRATED','2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f131',NULL,NULL,'585822c7-4027-4f84-ba50-e9248606f112','1.0','DNSResource',NULL,NULL);
+
+INSERT INTO `vnf_resource_customization` (`MODEL_CUSTOMIZATION_UUID`, `MODEL_INSTANCE_NAME`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_MAX_COUNT`, `NF_TYPE`, `NF_ROLE`, `NF_FUNCTION`, `NF_NAMING_CODE`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('302aa6be-a9fe-11e7-8b4b-0242ac120002','DNSResource-1',NULL,NULL,NULL,NULL,NULL,NULL,NULL,'2017-10-05 18:51:25','585822c7-4027-4f84-ba50-e9248606f131');
+
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
diff --git a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql
index fd540abb65..15001050b2 100644
--- a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql
+++ b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql
@@ -12,64 +12,48 @@ CREATE USER 'catalog';
GRANT ALL on mso_catalog.* to 'catalog' identified by 'catalog123' with GRANT OPTION;
FLUSH PRIVILEGES;
-# TODO: update this script to work with the new DB schema
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
-# LOCK TABLES `heat_environment` WRITE;
-# /*!40000 ALTER TABLE `heat_environment` DISABLE KEYS */;
-# INSERT INTO `heat_environment` VALUES (5,'base_vfw.env','1.0','vfw-service/VFWResource-1','base_vfw ENV file','parameters:\n vfw_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vfw_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n unprotected_private_net_id: zdfw1fwl01_unprotected\n protected_private_net_id: zdfw1fwl01_protected\n ecomp_private_net_id: oam_ecomp\n unprotected_private_net_cidr: 192.168.10.0/24\n protected_private_net_cidr: 192.168.20.0/24\n ecomp_private_net_cidr: 192.168.9.0/24\n vfw_private_ip_0: 192.168.10.100\n vfw_private_ip_1: 192.168.20.100\n vfw_private_ip_2: 192.168.9.100\n vpg_private_ip_0: 192.168.10.200\n vpg_private_ip_1: 192.168.9.200\n vsn_private_ip_0: 192.168.20.250\n vsn_private_ip_1: 192.168.9.250\n vfw_name_0: zdfw1fwl01fwl01\n vpg_name_0: zdfw1fwl01pgn01\n vsn_name_0: zdfw1fwl01snk01\n vnf_id: vFirewall_demo_app\n vf_module_id: vFirewall\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vfw_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','2016-11-14 13:04:07','EnvArtifact-UUID3','Label');
-# /*!40000 ALTER TABLE `heat_environment` ENABLE KEYS */;
-# UNLOCK TABLES;
-#
-# LOCK TABLES `heat_template` WRITE;
-# /*!40000 ALTER TABLE `heat_template` DISABLE KEYS */;
-# INSERT INTO `heat_template` VALUES (8,'base_vfw.yaml','1.0','VFWResource','base_vfw.yaml','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy vFirewall demo app for OpenECOMP\n\nparameters:\n vfw_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vfw_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n unprotected_private_net_id:\n type: string\n label: Unprotected private network name or ID\n description: Private network that connects vPacketGenerator with vFirewall\n protected_private_net_id:\n type: string\n label: Protected private network name or ID\n description: Private network that connects vFirewall with vSink\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n unprotected_private_net_cidr:\n type: string\n label: Unprotected private network CIDR\n description: The CIDR of the unprotected private network\n protected_private_net_cidr:\n type: string\n label: Protected private network CIDR\n description: The CIDR of the protected private network\n ecomp_private_net_cidr:\n type: string\n label: ECOMP private network CIDR\n description: The CIDR of the protected private network\n vfw_private_ip_0:\n type: string\n label: vFirewall private IP address towards the unprotected network\n description: Private IP address that is assigned to the vFirewall to communicate with the vPacketGenerator\n vfw_private_ip_1:\n type: string\n label: vFirewall private IP address towards the protected network\n description: Private IP address that is assigned to the vFirewall to communicate with the vSink\n vfw_private_ip_2:\n type: string\n label: vFirewall private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vFirewall to communicate with ECOMP components\n vpg_private_ip_0:\n type: string\n label: vPacketGenerator private IP address towards the unprotected network\n description: Private IP address that is assigned to the vPacketGenerator to communicate with the vFirewall\n vpg_private_ip_1:\n type: string\n label: vPacketGenerator private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vPacketGenerator to communicate with ECOMP components\n vsn_private_ip_0:\n type: string\n label: vSink private IP address towards the protected network\n description: Private IP address that is assigned to the vSink to communicate with the vFirewall\n vsn_private_ip_1:\n type: string\n label: vSink private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vSink to communicate with ECOMP components\n vfw_name_0:\n type: string\n label: vFirewall name\n description: Name of the vFirewall\n vpg_name_0:\n type: string\n label: vPacketGenerator name\n description: Name of the vPacketGenerator\n vsn_name_0:\n type: string\n label: vSink name\n description: Name of the vSink\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vFirewall Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n my_keypair:\n type: OS::Nova::KeyPair\n properties:\n name: { get_param: key_name }\n public_key: { get_param: pub_key }\n save_private_key: false\n\n unprotected_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: unprotected_private_net_id }\n\n protected_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: protected_private_net_id }\n\n unprotected_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n network_id: { get_resource: unprotected_private_network }\n cidr: { get_param: unprotected_private_net_cidr }\n\n protected_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n network_id: { get_resource: protected_private_network }\n cidr: { get_param: protected_private_net_cidr }\n\n vfw_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vfw_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vfw_private_0_port }\n - port: { get_resource: vfw_private_1_port }\n - port: { get_resource: vfw_private_2_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __dcae_collector_ip__ : { get_param: dcae_collector_ip }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n DCAE_COLLECTOR_IP=__dcae_collector_ip__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_firewall_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vfirewall.sh\n chmod +x v_firewall_init.sh\n chmod +x vfirewall.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $DCAE_COLLECTOR_IP > config/dcae_collector_ip.txt\n echo "no" > config/install.txt\n mv vfirewall.sh /etc/init.d\n sudo update-rc.d vfirewall.sh defaults\n ./v_firewall_init.sh\n\n vfw_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: unprotected_private_network }\n fixed_ips: [{"subnet": { get_resource: unprotected_private_subnet }, "ip_address": { get_param: vfw_private_ip_0 }}]\n\n vfw_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: protected_private_network }\n fixed_ips: [{"subnet": { get_resource: protected_private_subnet }, "ip_address": { get_param: vfw_private_ip_1 }}]\n\n vfw_private_2_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vfw_private_ip_2 }}]\n\n vpg_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vpg_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vpg_private_0_port }\n - port: { get_resource: vpg_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __fw_ipaddr__: { get_param: vfw_private_ip_0 }\n __protected_net_cidr__: { get_param: protected_private_net_cidr }\n __sink_ipaddr__: { get_param: vsn_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n FW_IPADDR=__fw_ipaddr__\n PROTECTED_NET_CIDR=__protected_net_cidr__\n SINK_IPADDR=__sink_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_packetgen_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vpacketgen.sh\n chmod +x v_packetgen_init.sh\n chmod +x vpacketgen.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $FW_IPADDR > config/fw_ipaddr.txt\n echo $PROTECTED_NET_CIDR > config/protected_net_cidr.txt\n echo $SINK_IPADDR > config/sink_ipaddr.txt\n echo "no" > config/install.txt\n mv vpacketgen.sh /etc/init.d\n sudo update-rc.d vpacketgen.sh defaults\n ./v_packetgen_init.sh\n\n vpg_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: unprotected_private_network }\n fixed_ips: [{"subnet": { get_resource: unprotected_private_subnet }, "ip_address": { get_param: vpg_private_ip_0 }}]\n\n vpg_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vpg_private_ip_1 }}]\n\n vsn_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vsn_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vsn_private_0_port }\n - port: { get_resource: vsn_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __protected_net_gw__: { get_param: vfw_private_ip_1 }\n __unprotected_net__: { get_param: unprotected_private_net_cidr }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n PROTECTED_NET_GW=__protected_net_gw__\n UNPROTECTED_NET=__unprotected_net__\n UNPROTECTED_NET=$(echo $UNPROTECTED_NET | cut -d\'/\' -f1)\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_sink_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vsink.sh\n chmod +x v_sink_init.sh\n chmod +x vsink.sh\n echo $PROTECTED_NET_GW > config/protected_net_gw.txt\n echo $UNPROTECTED_NET > config/unprotected_net.txt\n echo "no" > config/install.txt\n mv vsink.sh /etc/init.d\n sudo update-rc.d vsink.sh defaults\n ./v_sink_init.sh\n\n vsn_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: protected_private_network }\n fixed_ips: [{"subnet": { get_resource: protected_private_subnet }, "ip_address": { get_param: vsn_private_ip_0 }}]\n\n vsn_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vsn_private_ip_1 }}]\n \n',300,'Artifact-UUID3','Base VFW Heat','label','2016-11-14 13:04:07',NULL);
-# /*!40000 ALTER TABLE `heat_template` ENABLE KEYS */;
-# UNLOCK TABLES;
-#
-# LOCK TABLES `heat_template_params` WRITE;
-# /*!40000 ALTER TABLE `heat_template_params` DISABLE KEYS */;
-# INSERT INTO `heat_template_params` VALUES (144,8,'vsn_private_ip_1','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (145,8,'ecomp_private_net_cidr','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (146,8,'public_net_id','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (147,8,'unprotected_private_net_id','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (148,8,'webserver_ip','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (149,8,'vfw_image_name','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (150,8,'vnf_id','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (151,8,'dcae_collector_ip','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (152,8,'protected_private_net_cidr','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (153,8,'vsn_private_ip_0','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (154,8,'vfw_private_ip_0','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (155,8,'vfw_private_ip_1','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (156,8,'vfw_private_ip_2','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (157,8,'unprotected_private_net_cidr','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (158,8,'vsn_name_0','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (159,8,'ecomp_private_net_id','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (160,8,'vpg_private_ip_1','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (161,8,'vpg_name_0','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (162,8,'vf_module_id','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (163,8,'pub_key','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (164,8,'protected_private_net_id','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (165,8,'key_name','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (166,8,'vfw_flavor_name','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (167,8,'vpg_private_ip_0','\1','string',NULL);
-# INSERT INTO `heat_template_params` VALUES (168,8,'vfw_name_0','\1','string',NULL);
-# /*!40000 ALTER TABLE `heat_template_params` ENABLE KEYS */;
-# UNLOCK TABLES;
-#
-# LOCK TABLES `service` WRITE;
-# /*!40000 ALTER TABLE `service` DISABLE KEYS */;
-# INSERT INTO `service` VALUES (11,'vfw-service','1.0','VFW service','2e34774e-715e-4fd5-bd09-7b654622f35i',NULL,NULL,'2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f112');
-# /*!40000 ALTER TABLE `service` ENABLE KEYS */;
-# UNLOCK TABLES;
-#
-# LOCK TABLES `vf_module` WRITE;
-# /*!40000 ALTER TABLE `vf_module` DISABLE KEYS */;
-# INSERT INTO `vf_module` VALUES (9,'vfw-service/VFWResource-1::VF_RI1_VFW::module-1','1.0','VF_RI1_VFW::module-1','1.0','1e34774e-715e-4fd5-bd08-7b654622f33f.VF_RI1_VFW::module-1::module-1.group',NULL,8,1,'2016-11-14 13:04:07',NULL,NULL,7,5,'585822c7-4027-4f84-ba50-e9248606f134');
-# /*!40000 ALTER TABLE `vf_module` ENABLE KEYS */;
-# UNLOCK TABLES;
-#
-# LOCK TABLES `vnf_resource` WRITE;
-# /*!40000 ALTER TABLE `vnf_resource` DISABLE KEYS */;
-# INSERT INTO `vnf_resource` VALUES (7,'vfw-service/VFWResource-1','1.0','HEAT','VFW service',NULL,NULL,'2016-11-14 13:04:07','685822c7-4027-4f84-ba50-e9248606f132',NULL,NULL,'585822c7-4027-4f84-ba50-e9248606f113','1.0','VFWResource-1','VFWResource','585822c7-4027-4f84-ba50-e9248606f112');
-# /*!40000 ALTER TABLE `vnf_resource` ENABLE KEYS */;
-# UNLOCK TABLES;
+INSERT INTO `heat_environment` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('EnvArtifact-UUID3','base_vfw.env','1.0','base_vfw ENV file','parameters:\n vfw_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vfw_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n unprotected_private_net_id: zdfw1fwl01_unprotected\n protected_private_net_id: zdfw1fwl01_protected\n ecomp_private_net_id: oam_ecomp\n unprotected_private_net_cidr: 192.168.10.0/24\n protected_private_net_cidr: 192.168.20.0/24\n ecomp_private_net_cidr: 192.168.9.0/24\n vfw_private_ip_0: 192.168.10.100\n vfw_private_ip_1: 192.168.20.100\n vfw_private_ip_2: 192.168.9.100\n vpg_private_ip_0: 192.168.10.200\n vpg_private_ip_1: 192.168.9.200\n vsn_private_ip_0: 192.168.20.250\n vsn_private_ip_1: 192.168.9.250\n vfw_name_0: zdfw1fwl01fwl01\n vpg_name_0: zdfw1fwl01pgn01\n vsn_name_0: zdfw1fwl01snk01\n vnf_id: vFirewall_demo_app\n vf_module_id: vFirewall\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vfw_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('Artifact-UUID3','base_vfw.yaml','1.0','Base VFW Heat','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy vFirewall demo app for OpenECOMP\n\nparameters:\n vfw_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vfw_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n unprotected_private_net_id:\n type: string\n label: Unprotected private network name or ID\n description: Private network that connects vPacketGenerator with vFirewall\n protected_private_net_id:\n type: string\n label: Protected private network name or ID\n description: Private network that connects vFirewall with vSink\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n unprotected_private_net_cidr:\n type: string\n label: Unprotected private network CIDR\n description: The CIDR of the unprotected private network\n protected_private_net_cidr:\n type: string\n label: Protected private network CIDR\n description: The CIDR of the protected private network\n ecomp_private_net_cidr:\n type: string\n label: ECOMP private network CIDR\n description: The CIDR of the protected private network\n vfw_private_ip_0:\n type: string\n label: vFirewall private IP address towards the unprotected network\n description: Private IP address that is assigned to the vFirewall to communicate with the vPacketGenerator\n vfw_private_ip_1:\n type: string\n label: vFirewall private IP address towards the protected network\n description: Private IP address that is assigned to the vFirewall to communicate with the vSink\n vfw_private_ip_2:\n type: string\n label: vFirewall private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vFirewall to communicate with ECOMP components\n vpg_private_ip_0:\n type: string\n label: vPacketGenerator private IP address towards the unprotected network\n description: Private IP address that is assigned to the vPacketGenerator to communicate with the vFirewall\n vpg_private_ip_1:\n type: string\n label: vPacketGenerator private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vPacketGenerator to communicate with ECOMP components\n vsn_private_ip_0:\n type: string\n label: vSink private IP address towards the protected network\n description: Private IP address that is assigned to the vSink to communicate with the vFirewall\n vsn_private_ip_1:\n type: string\n label: vSink private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vSink to communicate with ECOMP components\n vfw_name_0:\n type: string\n label: vFirewall name\n description: Name of the vFirewall\n vpg_name_0:\n type: string\n label: vPacketGenerator name\n description: Name of the vPacketGenerator\n vsn_name_0:\n type: string\n label: vSink name\n description: Name of the vSink\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vFirewall Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n my_keypair:\n type: OS::Nova::KeyPair\n properties:\n name: { get_param: key_name }\n public_key: { get_param: pub_key }\n save_private_key: false\n\n unprotected_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: unprotected_private_net_id }\n\n protected_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: protected_private_net_id }\n\n unprotected_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n network_id: { get_resource: unprotected_private_network }\n cidr: { get_param: unprotected_private_net_cidr }\n\n protected_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n network_id: { get_resource: protected_private_network }\n cidr: { get_param: protected_private_net_cidr }\n\n vfw_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vfw_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vfw_private_0_port }\n - port: { get_resource: vfw_private_1_port }\n - port: { get_resource: vfw_private_2_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __dcae_collector_ip__ : { get_param: dcae_collector_ip }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n DCAE_COLLECTOR_IP=__dcae_collector_ip__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_firewall_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vfirewall.sh\n chmod +x v_firewall_init.sh\n chmod +x vfirewall.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $DCAE_COLLECTOR_IP > config/dcae_collector_ip.txt\n echo \"no\" > config/install.txt\n mv vfirewall.sh /etc/init.d\n sudo update-rc.d vfirewall.sh defaults\n ./v_firewall_init.sh\n\n vfw_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: unprotected_private_network }\n fixed_ips: [{\"subnet\": { get_resource: unprotected_private_subnet }, \"ip_address\": { get_param: vfw_private_ip_0 }}]\n\n vfw_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: protected_private_network }\n fixed_ips: [{\"subnet\": { get_resource: protected_private_subnet }, \"ip_address\": { get_param: vfw_private_ip_1 }}]\n\n vfw_private_2_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vfw_private_ip_2 }}]\n\n vpg_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vpg_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vpg_private_0_port }\n - port: { get_resource: vpg_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __fw_ipaddr__: { get_param: vfw_private_ip_0 }\n __protected_net_cidr__: { get_param: protected_private_net_cidr }\n __sink_ipaddr__: { get_param: vsn_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n FW_IPADDR=__fw_ipaddr__\n PROTECTED_NET_CIDR=__protected_net_cidr__\n SINK_IPADDR=__sink_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_packetgen_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vpacketgen.sh\n chmod +x v_packetgen_init.sh\n chmod +x vpacketgen.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $FW_IPADDR > config/fw_ipaddr.txt\n echo $PROTECTED_NET_CIDR > config/protected_net_cidr.txt\n echo $SINK_IPADDR > config/sink_ipaddr.txt\n echo \"no\" > config/install.txt\n mv vpacketgen.sh /etc/init.d\n sudo update-rc.d vpacketgen.sh defaults\n ./v_packetgen_init.sh\n\n vpg_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: unprotected_private_network }\n fixed_ips: [{\"subnet\": { get_resource: unprotected_private_subnet }, \"ip_address\": { get_param: vpg_private_ip_0 }}]\n\n vpg_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vpg_private_ip_1 }}]\n\n vsn_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vsn_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vsn_private_0_port }\n - port: { get_resource: vsn_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __protected_net_gw__: { get_param: vfw_private_ip_1 }\n __unprotected_net__: { get_param: unprotected_private_net_cidr }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n PROTECTED_NET_GW=__protected_net_gw__\n UNPROTECTED_NET=__unprotected_net__\n UNPROTECTED_NET=$(echo $UNPROTECTED_NET | cut -d\'/\' -f1)\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_sink_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vsink.sh\n chmod +x v_sink_init.sh\n chmod +x vsink.sh\n echo $PROTECTED_NET_GW > config/protected_net_gw.txt\n echo $UNPROTECTED_NET > config/unprotected_net.txt\n echo \"no\" > config/install.txt\n mv vsink.sh /etc/init.d\n sudo update-rc.d vsink.sh defaults\n ./v_sink_init.sh\n\n vsn_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: protected_private_network }\n fixed_ips: [{\"subnet\": { get_resource: protected_private_subnet }, \"ip_address\": { get_param: vsn_private_ip_0 }}]\n\n vsn_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vsn_private_ip_1 }}]\n \n',300,'MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','dcae_collector_ip','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','ecomp_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','ecomp_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','key_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','protected_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','protected_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','public_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','pub_key','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','unprotected_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','unprotected_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_flavor_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_image_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_private_ip_2','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vf_module_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vnf_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vpg_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vpg_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vpg_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vsn_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vsn_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vsn_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','webserver_ip','','string',NULL);
+
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('2e34774e-715e-4fd5-bd09-7b654622f35i','vfw-service','585822c7-4027-4f84-ba50-e9248606f112','1.0','VFW service','2016-11-14 13:04:07',NULL);
+
+INSERT INTO `vf_module` (`MODEL_UUID`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `DESCRIPTION`, `IS_BASE`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `VOL_HEAT_TEMPLATE_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('1e34774e-715e-4fd5-bd08-7b654622f33f.VF_RI1_VFW::module-1::module-1.group','585822c7-4027-4f84-ba50-e9248606f134','1.0','VF_RI1_VFW::module-1',NULL,1,'Artifact-UUID3',NULL,'2016-11-14 13:04:07','685822c7-4027-4f84-ba50-e9248606f132');
+
+INSERT INTO `vf_module_customization` (`MODEL_CUSTOMIZATION_UUID`, `LABEL`, `INITIAL_COUNT`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_COUNT`, `HEAT_ENVIRONMENT_ARTIFACT_UUID`, `VOL_ENVIRONMENT_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VF_MODULE_MODEL_UUID`) VALUES ('5aa23938-a9fe-11e7-8b4b-0242ac120002',NULL,1,0,NULL,NULL,'EnvArtifact-UUID3',NULL,'2017-10-05 18:52:03','1e34774e-715e-4fd5-bd08-7b654622f33f.VF_RI1_VFW::module-1::module-1.group');
+
+INSERT INTO `vnf_res_custom_to_vf_module_custom` (`VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID`, `VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID`, `CREATION_TIMESTAMP`) VALUES ('5a9bd247-a9fe-11e7-8b4b-0242ac120002','5aa23938-a9fe-11e7-8b4b-0242ac120002','2017-10-05 18:52:03');
+
+INSERT INTO `vnf_resource` (`ORCHESTRATION_MODE`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `MODEL_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `TOSCA_NODE_TYPE`, `HEAT_TEMPLATE_ARTIFACT_UUID`) VALUES ('HEAT','VFW service1707MIGRATED','2016-11-14 13:04:07','685822c7-4027-4f84-ba50-e9248606f132',NULL,NULL,'585822c7-4027-4f84-ba50-e9248606f113','1.0','VFWResource',NULL,NULL);
+
+INSERT INTO `vnf_resource_customization` (`MODEL_CUSTOMIZATION_UUID`, `MODEL_INSTANCE_NAME`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_MAX_COUNT`, `NF_TYPE`, `NF_ROLE`, `NF_FUNCTION`, `NF_NAMING_CODE`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('5a9bd247-a9fe-11e7-8b4b-0242ac120002','VFWResource-1',NULL,NULL,NULL,NULL,NULL,NULL,NULL,'2017-10-05 18:52:03','685822c7-4027-4f84-ba50-e9248606f132');
+
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
diff --git a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql
index c550304d9b..9a3138b688 100644
--- a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql
+++ b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql
@@ -2,25 +2,24 @@ USE camundabpmn;
INSERT INTO `act_id_group` (`ID_`, `REV_`, `NAME_`, `TYPE_`) VALUES ('camunda-admin',1,'camunda BPM Administrators','SYSTEM');
-INSERT INTO `act_id_user` (`ID_`, `REV_`, `FIRST_`, `LAST_`, `EMAIL_`, `PWD_`, `SALT_`, `PICTURE_ID_`) VALUES ('admin',1,'admin','user','camundaadmin@onap.org','{SHA-512}p9Y4lC0X70X5ihfqGTNz/NDuWRbAgtnlEfjbYLMHLp0tMl//B7ujc80MLcHWlJ+jIG14uWTI6AqQYp6PykCxZg==','2vEsAj7cap7XEidkrd4BVg==',NULL);
+INSERT INTO `act_id_user` (`ID_`, `REV_`, `FIRST_`, `LAST_`, `EMAIL_`, `PWD_`, `SALT_`, `PICTURE_ID_`) VALUES ('admin',1,'admin','user','camundaadmin@onap.org','{SHA-512}n5jUw7fvXM9sZBcrIkLiAOCqiPHutaqEkbg6IQVQdylVP1im8SczBJf4f2xL7cvWwIAZjkcSSQzgFTsdaJSEiA==','ftTn4gNgMcq07wdSD0lEJQ==',NULL);
INSERT INTO `act_id_membership` (`USER_ID_`, `GROUP_ID_`) VALUES ('admin','camunda-admin');
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68bc7a75-9cdc-11e7-a63d-0242ac120003',1,1,NULL,'admin',1,'admin',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68c13566-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,2,'camunda-admin',2);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68c4dee7-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,0,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68ca5d28-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,1,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68cd9179-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,2,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68d0ecda-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,3,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68d384eb-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,4,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68d5a7cc-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,5,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68d83fdd-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,6,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68dad7ee-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,7,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68dd48ef-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,8,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68e0a450-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,9,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68e31551-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,10,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68e5ad62-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,11,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68e908c3-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,12,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68eb52b4-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,13,'*',2147483647);
-INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('68edeac5-9cdc-11e7-a63d-0242ac120003',1,1,'camunda-admin',NULL,14,'*',2147483647);
-
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49b0e028-a3c6-11e7-b0ec-0242ac120003',1,1,NULL,'admin',1,'admin',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49b525e9-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,2,'camunda-admin',2);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49b8814a-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,0,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49baa42b-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,1,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49bd8a5c-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,2,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49bfd44d-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,3,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49c1f72e-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,4,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49c41a0f-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,5,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49c77570-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,6,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49ca5ba1-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,7,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49cca592-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,8,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49ceef83-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,9,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49d11264-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,10,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49d38365-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,11,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49d5a646-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,12,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49d83e57-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,13,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49da3a28-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,14,'*',2147483647);
diff --git a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql
index b8253616da..a6a7dc59c1 100644
--- a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql
+++ b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql
@@ -27,10 +27,6 @@
drop
foreign key FK_iff1ayhb1hrp5jhea3vvikuni;
- alter table VNF_RES_CUSTOM_TO_VF_MODULE_CUSTOM
- drop
- foreign key FK_6tdyg2ib5eguh4k2qgofh4fj7;
-
drop table if exists ALLOTTED_RESOURCE;
drop table if exists ALLOTTED_RESOURCE_CUSTOMIZATION;
@@ -95,7 +91,7 @@
TOSCA_NODE_TYPE varchar(255),
SUBCATEGORY varchar(255),
DESCRIPTION varchar(255),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (MODEL_UUID)
);
@@ -111,7 +107,7 @@
NF_NAMING_CODE varchar(255),
MIN_INSTANCES integer,
MAX_INSTANCES integer,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (MODEL_CUSTOMIZATION_UUID)
);
@@ -121,7 +117,7 @@
VERSION varchar(20) not null,
DESCRIPTION varchar(1200),
BODY longtext not null,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
ARTIFACT_CHECKSUM varchar(200) default 'MANUAL RECORD',
primary key (ARTIFACT_UUID)
);
@@ -132,7 +128,7 @@
NAME varchar(255),
VERSION varchar(255),
BODY varchar(255),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
ARTIFACT_CHECKSUM varchar(255),
primary key (ARTIFACT_UUID)
);
@@ -151,7 +147,7 @@
BODY longtext not null,
TIMEOUT_MINUTES integer,
DESCRIPTION varchar(1200),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
ARTIFACT_CHECKSUM varchar(200) default 'MANUAL RECORD' not null,
primary key (ARTIFACT_UUID)
);
@@ -174,7 +170,7 @@
MODEL_VERSION varchar(20),
MODEL_CUSTOMIZATION_ID varchar(40),
MODEL_CUSTOMIZATION_NAME varchar(40),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (id)
);
@@ -187,7 +183,7 @@
ORCHESTRATION_URI varchar(256) not null,
MODEL_PARAM_XSD varchar(2048),
RECIPE_TIMEOUT integer,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (id)
);
@@ -201,7 +197,7 @@
ORCHESTRATION_URI varchar(256) not null,
NETWORK_PARAM_XSD varchar(2048),
RECIPE_TIMEOUT integer,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (id)
);
@@ -217,7 +213,7 @@
HEAT_TEMPLATE_ARTIFACT_UUID varchar(200) not null,
AIC_VERSION_MIN varchar(20) default 2.5 not null,
AIC_VERSION_MAX varchar(20) default 2.5,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (MODEL_UUID)
);
@@ -229,7 +225,7 @@
NETWORK_TYPE varchar(255),
NETWORK_SCOPE varchar(255),
NETWORK_ROLE varchar(255),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (MODEL_CUSTOMIZATION_UUID)
);
@@ -239,7 +235,7 @@
MODEL_VERSION varchar(20) not null,
DESCRIPTION varchar(1200),
TOSCA_CSAR_ARTIFACT_UUID varchar(200),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
MODEL_INVARIANT_UUID varchar(200) default 'MANUAL_RECORD' not null,
SERVICE_TYPE varchar(20),
SERVICE_ROLE varchar(20),
@@ -251,7 +247,7 @@
SERVICE_MODEL_UUID varchar(200) not null,
ACTION varchar(40) not null,
ORCHESTRATION_URI varchar(256) not null,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
VERSION_STR varchar(20),
DESCRIPTION varchar(1200),
SERVICE_PARAM_XSD varchar(2048),
@@ -263,21 +259,21 @@
create table SERVICE_TO_ALLOTTED_RESOURCES (
SERVICE_MODEL_UUID varchar(200) not null,
AR_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (SERVICE_MODEL_UUID, AR_MODEL_CUSTOMIZATION_UUID)
);
create table SERVICE_TO_NETWORKS (
SERVICE_MODEL_UUID varchar(200) not null,
NETWORK_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (SERVICE_MODEL_UUID, NETWORK_MODEL_CUSTOMIZATION_UUID)
);
create table SERVICE_TO_RESOURCE_CUSTOMIZATIONS (
MODEL_TYPE varchar(20) not null,
RESOURCE_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
SERVICE_MODEL_UUID varchar(200) not null,
primary key (MODEL_TYPE, RESOURCE_MODEL_CUSTOMIZATION_UUID)
);
@@ -297,7 +293,7 @@
ARTIFACT_CHECKSUM varchar(200) not null,
URL varchar(200) not null,
DESCRIPTION varchar(1200),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (ARTIFACT_UUID)
);
@@ -311,7 +307,7 @@
IS_BASE integer not null,
HEAT_TEMPLATE_ARTIFACT_UUID varchar(200) not null,
VOL_HEAT_TEMPLATE_ARTIFACT_UUID varchar(200),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (MODEL_UUID)
);
@@ -319,7 +315,7 @@
MODEL_CUSTOMIZATION_UUID varchar(200) not null,
VF_MODULE_MODEL_UUID varchar(200) not null,
VOL_ENVIRONMENT_ARTIFACT_UUID varchar(200),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
HEAT_ENVIRONMENT_ARTIFACT_UUID varchar(200),
MIN_INSTANCES integer,
MAX_INSTANCES integer,
@@ -340,7 +336,7 @@
COMPONENT_TYPE varchar(20) not null,
HEAT_TEMPLATE_ID integer,
HEAT_ENVIRONMENT_ID integer,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (VNF_ID, COMPONENT_TYPE)
);
@@ -356,7 +352,7 @@
ORCHESTRATION_URI varchar(256) not null,
VNF_COMPONENT_PARAM_XSD varchar(2048),
RECIPE_TIMEOUT integer,
- CREATION_TIMESTAMP datetime,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (id)
);
@@ -371,7 +367,7 @@
ORCHESTRATION_URI varchar(256) not null,
VNF_PARAM_XSD varchar(2048),
RECIPE_TIMEOUT integer,
- CREATION_TIMESTAMP datetime,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (id)
);
@@ -386,7 +382,7 @@
AIC_VERSION_MIN varchar(20),
AIC_VERSION_MAX varchar(20),
HEAT_TEMPLATE_ARTIFACT_UUID varchar(200),
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (MODEL_UUID)
);
@@ -401,15 +397,14 @@
NF_ROLE varchar(200),
NF_NAMING_CODE varchar(200),
VNF_RESOURCE_MODEL_UUID varchar(200) not null,
- CREATION_TIMESTAMP datetime not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (MODEL_CUSTOMIZATION_UUID)
);
create table VNF_RES_CUSTOM_TO_VF_MODULE_CUSTOM (
VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
- CREATION_TIMESTAMP datetime not null,
- VNF_RESOURCE_MODEL_UUID varchar(200) not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID, VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID)
);
@@ -465,8 +460,3 @@
add constraint FK_iff1ayhb1hrp5jhea3vvikuni
foreign key (VNF_RESOURCE_MODEL_UUID)
references VNF_RESOURCE (MODEL_UUID);
-
- alter table VNF_RES_CUSTOM_TO_VF_MODULE_CUSTOM
- add constraint FK_6tdyg2ib5eguh4k2qgofh4fj7
- foreign key (VNF_RESOURCE_MODEL_UUID)
- references VNF_RESOURCE_CUSTOMIZATION (MODEL_CUSTOMIZATION_UUID);
diff --git a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql
index c6f070d7d7..83f0b087c6 100644
--- a/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql
+++ b/kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql
@@ -50,7 +50,7 @@
create table SITE_STATUS (
SITE_NAME varchar(255) not null,
STATUS bit,
- CREATION_TIMESTAMP datetime,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
primary key (SITE_NAME)
);
diff --git a/kubernetes/config/docker/init/src/config/mso/mso/mso-docker.json b/kubernetes/config/docker/init/src/config/mso/mso/mso-docker.json
index 1b28858ac9..76a3370ac8 100755
--- a/kubernetes/config/docker/init/src/config/mso/mso/mso-docker.json
+++ b/kubernetes/config/docker/init/src/config/mso/mso/mso-docker.json
@@ -117,8 +117,8 @@
"versionIdL3ToHigherLayerDeleteBonding": "52dbec20-47aa-42e4-936c-331d8e350d44",
"infraCustomerId": "21014aa2-526b-11e6-beb8-9e71128cae77",
"sniroAuth": "test:testpwd",
- "sniroCallback": "https://mso.onap-mso.svc.cluster.local:8080/adapters/rest/SDNCNotify/SNIROResponse",
- "sniroEndpoint": "https://TODO.att.c om:14699/sniro/api/v1/placement",
+ "sniroCallback": "http://mso.onap-mso.svc.cluster.local:8080/adapters/rest/SDNCNotify/SNIROResponse",
+ "sniroEndpoint": "http://sniro-emulator.onap-mock.svc.cluster.local:8080/sniro/api/v2/placement",
"sniroTimeout": "PT30M",
"sniroPoliciesDHV2vvig": "SNIRO.DistanceToLocationPolicy_vhngw,SNIRO.VNFPolicy_vhngatewayprimary1_v1,SNIRO.ResourceInstancePolicy_hngateway,SNIRO.ResourceRegionPolicy_hngateway_v1,SNIRO.VNFPolicy_vhngatewaysecondary1_v1,SNIRO.ZonePolicy_vhngw,SNIRO.PlacementOptimizationPolicy_dhv_v3,SNIRO.VNFPolicy_vhnportal_primary1_v1,SNIRO.ResourceInstancePolicy_vhnportal_v3,SNIRO.ResourceRegionPolicy_vhnportal_v1,SNIRO.VNFPolicy_vhnportalsecondary1_v1,SNIRO.ZonePolicy_vhnportal,SNIRO.DistanceToLocationPolicy_vvig,SNIRO.InventoryGroupPolicy_vvig,SNIRO.VNFPolicy_vvigprimary1_v1,SNIRO.ResourceInstancePolicy_vvig,SNIRO.VNFPolicy_vvigsecondary1_v1",
"sniroPoliciesDHV4vvig": "SNIRO.DistanceToLocationPolicy_vhngw,SNIRO.VNFPolicy_vhngatewayprimary1_v1,SNIRO.ResourceInstancePolicy_hngateway,SNIRO.ResourceRegionPolicy_hngateway_v1,SNIRO.VNFPolicy_vhngatewaysecondary1_v1,SNIRO.ZonePolicy_vhngw,SNIRO.PlacementOptimizationPolicy_dhv_v3,SNIRO.VNFPolicy_vhnportal_primary1_v1,SNIRO.ResourceInstancePolicy_vhnportal_v3,SNIRO.ResourceRegionPolicy_vhnportal_v1,SNIRO.VNFPolicy_vhnportalsecondary1_v1,SNIRO.ZonePolicy_vhnportal,SNIRO.VNFPolicy_vvigprimary2_v1,SNIRO.VNFPolicy_vvigsecondary2_v1,SNIRO.DistanceToLocationPolicy_vvig,SNIRO.InventoryGroupPolicy_vvig,SNIRO.VNFPolicy_vvigprimary1_v1,SNIRO.ResourceInstancePolicy_vvig,SNIRO.VNFPolicy_vvigsecondary1_v1",
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/dbcapp.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/dbcapp.properties
deleted file mode 100755
index 7415363e4e..0000000000
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/dbcapp.properties
+++ /dev/null
@@ -1,8 +0,0 @@
-# Properties for the Data Bus Controller webapp
-dmaap.rest.url.list = http://dcae-collector-dmaapbc:18080/webapi
-# webapp's mechid is sent to DCAE for authorization
-dmaap.mechid.name = m06672@dbcapp.dmaap.dcae.att.com
-# encrypted with CipherUtil
-dmaap.mechid.password = /F1vRhga1Ijw7yRFFj6R5A==
-# Valid access methods are "dao" and "rest"
-profile.access.method = dao
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/fusion.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/fusion.properties
deleted file mode 100755
index d49d602376..0000000000
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/fusion.properties
+++ /dev/null
@@ -1,34 +0,0 @@
-# login settings
-login_method_backdoor = backdoor
-login_method_attribute_name = login_method
-login_method_csp = csp
-login_method_web_junction = web_junction
-
-#login message
-login.error.hrid.empty = Login failed, please contact system administrator.
-login.error.hrid.not-found = User not found, please contact system administrator.
-login.error.user.inactive = Account is disabled, please contact system administrator.
-
-authentication_mechanism = DBAUTH
-
-user_attribute_name = user
-
-# User Session settings
-roles_attribute_name = roles
-role_functions_attribute_name = role_functions
-
-# POST settings
-post_initial_context_factory = com.sun.jndi.ldap.LdapCtxFactory
-post_provider_url = todo_ldap
-post_security_principal = ou=people,o=org,c=us
-post_max_result_size = 499
-
-# menu settings
-menu_query_name = menuData
-application_menu_set_name = APP
-application_menu_attribute_name = applicationMenuData
-business_direct_menu_set_name = BD
-business_direct_menu_attribute_name = businessDirectMenuData
-
-# Role settings
-sys_admin_role_id = 1
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/portal.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/portal.properties
deleted file mode 100755
index 6dbbee8417..0000000000
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/portal.properties
+++ /dev/null
@@ -1,25 +0,0 @@
-# Properties read by ECOMP Framework library, ecompFW.jar
-
-portal.api.impl.class = org.openecomp.portalapp.service.OnBoardingApiServiceImpl
-portal.api.prefix = /api
-max.idle.time = 5
-user.attribute.name = user_attribute
-
-# Legacy property TODO
-ecomp_redirect_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPPORTAL/login.htm
-
-# URL of the ECOMP Portal REST API
-ecomp_rest_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPPORTAL/auxapi
-
-#Use REST API instead of UEB to fetch the functional menu data
-use_rest_for_functional_menu=true
-
-# Don't fetch functional menu via UEB in 1610.
-ueb_listeners_enable = false
-
-# Application key is used by session management
-ueb_app_key = MtRwsF16RdpHZ7eM
-
-# The inbox name property should not be required when the REST API is used,
-# but in 1610 FuncMenuController logs ALARM if it cannot find this key.
-ecomp_portal_inbox_name = ECOMP-PORTAL-INBOX-TEST
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/system.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/system.properties
deleted file mode 100755
index 46048ec520..0000000000
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPDBCAPP/system.properties
+++ /dev/null
@@ -1,63 +0,0 @@
-#Local
-app_display_name = DMaaP Bus Ctlr
-
-db.driver = com.mysql.jdbc.Driver
-db.connectionURL = jdbc:mysql://portaldb.onap-portal:3306/dbca_os
-db.userName = root
-db.password = password
-db.encrypt_flag = false
-db.hib.dialect = org.hibernate.dialect.MySQLDialect
-db.min_pool_size = 5
-db.max_pool_size = 10
-hb.dialect = org.hibernate.dialect.MySQLDialect
-hb.show_sql = false
-hb.db_reconnect = true
-hb.idle_connection_test_period = 3600
-
-# Directory with the Quantum.lic file
-files_path = /opt/app/tomcat/webapps/dmaap-bc-app/WEB-INF/dbcapp
-
-application_user_id = 30000
-post_default_role_id = 16
-clustered = true
-
-#Enable Fusion Mobile capabilities for the application
-mobile_enable = false
-
-# Cache config file is needed on the classpath
-cache_config_file_path = /WEB-INF/classes/cache.ccf
-cache_switch = 199
-cache_load_on_startup = false
-
-user_name = fullName
-decryption_key = AGLDdG4D04BKm2IxIWEr8o==
-
-#element map files
-element_map_file_path = /tmp
-element_map_icon_path = app/fusionapp/icons/
-
-# Quartz is not used by the DBC application
-# log_cron =
-# mylogins_feed_cron =
-# sessiontimeout_feed_cron =
-# my_login_feed_output_dir =
-
-# ECOMP Portal Shared Context REST API URL is not used by the DBC application
-# ecomp_shared_context_rest_url =
-
-contact_us_link = https://todo_contact_us_link
-
-# An Unique 128-bit value defined to identify a specific version
-# of an application deployed on a specific virtual machine.
-# This value must be generated and updated by the application
-# which is using the ECOMP SDK at the time of its deployment.
-# Online Unique UUID generator - https://www.uuidgenerator.net/
-# ID generated for DBC to supply to EELF
-# A bogus value is cached in SCM: 12345678-..
-instance_uuid = 12345678-90ab-cdef-1234-567890abcdef
-
-# R Cloud feature
-guard_notebook_url=https://todo_rcloud_link
-
-# Application base URL is a proper prefix of the on-boarded URL.
-app_base_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPDBCAPP/
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/fusion.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/fusion.properties
index c92a57393e..20285e3677 100755
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/fusion.properties
+++ b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/fusion.properties
@@ -1,5 +1,5 @@
# domain settings
-#domain_class_location =
+#domain_class_location =
# validator settings
#default_error_message = Default error message
@@ -31,7 +31,7 @@ home_page = /index.html
authentication_mechanism =DBAUTH
-login.error.hrid.empty = Login failed, please contact system administrator.
+login.error.hrid.empty = Login failed, please contact system administrator.
login.error.hrid.not-found = User not found, please contact system administrator.
login.error.user.inactive = Account is disabled, please contact system administrator.
@@ -40,7 +40,7 @@ login.error.user.inactive = Account is disabled, please contact system administr
#
health_poll_interval_seconds = 5
#
-# If a component is down a log entry will be written that triggers an alert. This parameter specifies how often this alert should be triggered
+# If a component is down a log entry will be written that triggers an alert. This parameter specifies how often this alert should be triggered
# if the component remains down. For example a value of 30, would translate to 30 * 60 seconds = 1800 seconds, or every 30 minutes
#
health_fail_alert_every_x_intervals = 30
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/logback.xml b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/logback.xml
index 0c0d764701..2c319e03a3 100644
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/logback.xml
+++ b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/logback.xml
@@ -1,27 +1,45 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
- ================================================================================
- eCOMP Portal
- ================================================================================
- Copyright (C) 2017 AT&T Intellectual Property
- ================================================================================
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
+ ============LICENSE_START==========================================
+ ONAP Portal
+ ===================================================================
+ Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ ===================================================================
+
+ Unless otherwise specified, all software contained herein is licensed
+ under the Apache License, Version 2.0 (the “License”);
+ you may not use this software except in compliance with the License.
You may obtain a copy of the License at
-
+
http://www.apache.org/licenses/LICENSE-2.0
-
+
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
- ================================================================================
+
+ Unless otherwise specified, all documentation contained herein is licensed
+ under the Creative Commons License, Attribution 4.0 Intl. (the “License”);
+ you may not use this documentation except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://creativecommons.org/licenses/by/4.0/
+
+ Unless required by applicable law or agreed to in writing, documentation
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ ============LICENSE_END============================================
+
+ ECOMP is a trademark and service mark of AT&T Intellectual Property.
-->
<!DOCTYPE xml>
<configuration scan="true" scanPeriod="3 seconds" debug="true">
- <!-- Log-back files for the ECOMP Portal "ecompportal" are created in directory
+ <!-- Log-back files for the ECOMP Portal "ecompportal" are created in directory
${catalina.base}/logs/ecompportal; e.g., apache-tomcat-8.0.35/logs/ecompportal/application.log -->
<!--<jmxConfigurator /> -->
@@ -42,35 +60,35 @@
<property name="metricsLogName" value="metrics" />
<property name="auditLogName" value="audit" />
<property name="debugLogName" value="debug" />
- <!-- These loggers are not used in code (yet). <property name="securityLogName"
- value="security" /> <property name="policyLogName" value="policy" /> <property
- name="performanceLogName" value="performance" /> <property name="serverLogName"
+ <!-- These loggers are not used in code (yet). <property name="securityLogName"
+ value="security" /> <property name="policyLogName" value="policy" /> <property
+ name="performanceLogName" value="performance" /> <property name="serverLogName"
value="server" /> -->
<!-- ServerFQDN=Server, -->
<property name="auditLoggerPattern"
value="%X{AuditLogBeginTimestamp}|%X{AuditLogEndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{Timer}|%X{ServerFQDN}|%X{ClientIPAddress}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| %msg%n" />
-
+
<property name="metricsLoggerPattern"
value="%X{MetricsLogBeginTimestamp}|%X{MetricsLogEndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{Timer}|%X{ServerFQDN}|%X{ClientIPAddress}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{TargetVisualEntity}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| %msg%n" />
-
+
<property name="errorLoggerPattern"
value="%date{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%X{RequestId}|%thread|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{ClassName}|%X{AlertSeverity}|%X{ErrorCode}|%X{ErrorDescription}| %msg%n" />
-
+
<property name="defaultLoggerPattern"
value="%date{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%X{RequestId}|%thread|%X{ClassName}| %msg%n" />
-
+
<!-- use %class so library logging calls yield their class name -->
<property name="applicationLoggerPattern"
value="%date{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%X{RequestId}|%thread|%class{36}| %msg%n" />
-
- <!--
+
+ <!--
<property name="defaultPattern"
value="%date{ISO8601}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Timer}| %msg%n" />
<property name="debugLoggerPattern"
value="%date{ISO8601}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Timer}| %msg%n" />
-->
- <!-- <property name="debugLoggerPattern" value="%date{ISO8601}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n"
+ <!-- <property name="debugLoggerPattern" value="%date{ISO8601}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n"
/> -->
<!-- Example evaluator filter applied against console appender -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
@@ -83,7 +101,7 @@
<!-- EELF Appenders -->
<!-- ============================================================================ -->
- <!-- The EELFAppender is used to record events to the general application
+ <!-- The EELFAppender is used to record events to the general application
log -->
@@ -109,65 +127,65 @@
<appender-ref ref="EELF" />
</appender>
- <!-- EELF Security Appender. This appender is used to record security events
- to the security log file. Security events are separate from other loggers
- in EELF so that security log records can be captured and managed in a secure
- way separate from the other logs. This appender is set to never discard any
+ <!-- EELF Security Appender. This appender is used to record security events
+ to the security log file. Security events are separate from other loggers
+ in EELF so that security log records can be captured and managed in a secure
+ way separate from the other logs. This appender is set to never discard any
events. -->
- <!-- <appender name="EELFSecurity" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${securityLogName}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
- <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip </fileNamePattern>
- <minIndex>1</minIndex> <maxIndex>9</maxIndex> </rollingPolicy> <triggeringPolicy
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>5MB</maxFileSize>
- </triggeringPolicy> <encoder> <pattern>${defaultPattern}</pattern> </encoder>
- </appender> <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>256</queueSize> <discardingThreshold>0</discardingThreshold> <appender-ref
+ <!-- <appender name="EELFSecurity" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${securityLogName}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip </fileNamePattern>
+ <minIndex>1</minIndex> <maxIndex>9</maxIndex> </rollingPolicy> <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy> <encoder> <pattern>${defaultPattern}</pattern> </encoder>
+ </appender> <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize> <discardingThreshold>0</discardingThreshold> <appender-ref
ref="EELFSecurity" /> </appender> -->
- <!-- EELF Performance Appender. This appender is used to record performance
+ <!-- EELF Performance Appender. This appender is used to record performance
records. -->
- <!-- <appender name="EELFPerformance" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${performanceLogName}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
- <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip </fileNamePattern>
- <minIndex>1</minIndex> <maxIndex>9</maxIndex> </rollingPolicy> <triggeringPolicy
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>5MB</maxFileSize>
- </triggeringPolicy> <encoder> <outputPatternAsHeader>true</outputPatternAsHeader>
- <pattern>${defaultPattern}</pattern> </encoder> </appender> <appender name="asyncEELFPerformance"
- class="ch.qos.logback.classic.AsyncAppender"> <queueSize>256</queueSize>
+ <!-- <appender name="EELFPerformance" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${performanceLogName}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip </fileNamePattern>
+ <minIndex>1</minIndex> <maxIndex>9</maxIndex> </rollingPolicy> <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy> <encoder> <outputPatternAsHeader>true</outputPatternAsHeader>
+ <pattern>${defaultPattern}</pattern> </encoder> </appender> <appender name="asyncEELFPerformance"
+ class="ch.qos.logback.classic.AsyncAppender"> <queueSize>256</queueSize>
<appender-ref ref="EELFPerformance" /> </appender> -->
- <!-- EELF Server Appender. This appender is used to record Server related
- logging events. The Server logger and appender are specializations of the
- EELF application root logger and appender. This can be used to segregate
- Server events from other components, or it can be eliminated to record these
+ <!-- EELF Server Appender. This appender is used to record Server related
+ logging events. The Server logger and appender are specializations of the
+ EELF application root logger and appender. This can be used to segregate
+ Server events from other components, or it can be eliminated to record these
events as part of the application root log. -->
- <!-- <appender name="EELFServer" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${serverLogName}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
- <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip </fileNamePattern>
- <minIndex>1</minIndex> <maxIndex>9</maxIndex> </rollingPolicy> <triggeringPolicy
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>5MB</maxFileSize>
- </triggeringPolicy> <encoder> <pattern>${defaultPattern}</pattern> </encoder>
- </appender> <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">
+ <!-- <appender name="EELFServer" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${serverLogName}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip </fileNamePattern>
+ <minIndex>1</minIndex> <maxIndex>9</maxIndex> </rollingPolicy> <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy> <encoder> <pattern>${defaultPattern}</pattern> </encoder>
+ </appender> <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">
<queueSize>256</queueSize> <appender-ref ref="EELFServer" /> </appender> -->
- <!-- EELF Policy Appender. This appender is used to record Policy engine
- related logging events. The Policy logger and appender are specializations
- of the EELF application root logger and appender. This can be used to segregate
- Policy engine events from other components, or it can be eliminated to record
+ <!-- EELF Policy Appender. This appender is used to record Policy engine
+ related logging events. The Policy logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
these events as part of the application root log. -->
- <!-- <appender name="EELFPolicy" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${policyLogName}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
- <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip </fileNamePattern>
- <minIndex>1</minIndex> <maxIndex>9</maxIndex> </rollingPolicy> <triggeringPolicy
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>5MB</maxFileSize>
- </triggeringPolicy> <encoder> <pattern>${defaultPattern}</pattern> </encoder>
- </appender> <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">
+ <!-- <appender name="EELFPolicy" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${policyLogName}.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip </fileNamePattern>
+ <minIndex>1</minIndex> <maxIndex>9</maxIndex> </rollingPolicy> <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy"> <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy> <encoder> <pattern>${defaultPattern}</pattern> </encoder>
+ </appender> <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">
<queueSize>256</queueSize> <appender-ref ref="EELFPolicy" /> </appender> -->
- <!-- EELF Audit Appender. This appender is used to record audit engine related
- logging events. The audit logger and appender are specializations of the
- EELF application root logger and appender. This can be used to segregate
- Policy engine events from other components, or it can be eliminated to record
+ <!-- EELF Audit Appender. This appender is used to record audit engine related
+ logging events. The audit logger and appender are specializations of the
+ EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
these events as part of the application root log. -->
<appender name="EELFAudit"
@@ -255,11 +273,11 @@
<appender-ref ref="asyncEELF" />
</logger>
- <!-- <logger name="com.att.eelf.security" level="info" additivity="false">
- <appender-ref ref="asyncEELFSecurity" /> </logger> <logger name="com.att.eelf.perf"
- level="info" additivity="false"> <appender-ref ref="asyncEELFPerformance"
- /> </logger> <logger name="com.att.eelf.server" level="info" additivity="false">
- <appender-ref ref="asyncEELFServer" /> </logger> <logger name="com.att.eelf.policy"
+ <!-- <logger name="com.att.eelf.security" level="info" additivity="false">
+ <appender-ref ref="asyncEELFSecurity" /> </logger> <logger name="com.att.eelf.perf"
+ level="info" additivity="false"> <appender-ref ref="asyncEELFPerformance"
+ /> </logger> <logger name="com.att.eelf.server" level="info" additivity="false">
+ <appender-ref ref="asyncEELFServer" /> </logger> <logger name="com.att.eelf.policy"
level="info" additivity="false"> <appender-ref ref="asyncEELFPolicy" /> </logger> -->
<logger name="com.att.eelf.audit" level="info" additivity="false">
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/portal.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/portal.properties
index dc6ef6ace2..0863ef5fc2 100755
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/portal.properties
+++ b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/portal.properties
@@ -1,4 +1,5 @@
-portal.api.impl.class = org.openecomp.portalsdk.core.onboarding.client.OnBoardingApiServiceImpl
+# Not used by portal
+portal.api.impl.class = org.onap.portalsdk.core.onboarding.client.OnBoardingApiServiceImpl.not.used.by.portal
portal.api.prefix = /api
max.idle.time = 5
user.attribute.name = user_attribute
@@ -11,7 +12,7 @@ ecomp_rest_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPPORTAL/aux
ueb_listeners_enable = true
-ueb_app_key = 7GkVcrO6sIDb3ngW
+ueb_app_key = 7GkVcrO6sIDb3ngW
ueb_app_secret = uCYgKjWKK5IxPGNNZzYSSWo9
ueb_app_mailbox_name = ECOMP-PORTAL-INBOX
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/system.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/system.properties
index d6115b1837..2cb38443a6 100755
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/system.properties
+++ b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPPORTALAPP/system.properties
@@ -1,8 +1,8 @@
#mysql
-db.driver = com.mysql.jdbc.Driver
-db.connectionURL = jdbc:mysql://portaldb.onap-portal:3306/portal
+db.driver = org.mariadb.jdbc.Driver
+db.connectionURL = jdbc:mariadb:failover://portaldb.onap-portal:3306/portal
db.userName =root
-db.password =password
+db.password =Aa123456
db.hib.dialect = org.hibernate.dialect.MySQLDialect
db.min_pool_size = 5
db.max_pool_size = 10
@@ -44,14 +44,14 @@ log_cron = 0 * * * * ? *
sessiontimeout_feed_cron = 0 0/5 * * * ? *
#Front end URL
-frontend_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPPORTAL/applicationsHome
+frontend_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPPORTAL/applicationsHome
#cookie domain
cookie_domain = openecomp.org
# An Unqiue 128-bit value defined to indentify a specific version of
# ECOMP Portal deployed on a specific virtual machine.
-# This value must be generated and updated at the time of
+# This value must be generated and updated at the time of
# the deployment.
# Online Unique UUID generator - https://www.uuidgenerator.net/
instance_uuid = 90bc9497-10e6-49fe-916b-dcdfaa972383
@@ -95,3 +95,6 @@ external_access_enable = false
#delete auditlog from number of days ago
auditlog_del_day_from = 365
+
+#External system notification URL
+external_system_notification_url= https://jira.onap.org/browse/
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/fusion.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/fusion.properties
index 8ce8cc95ad..8ee0a95f74 100644
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/fusion.properties
+++ b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/fusion.properties
@@ -8,7 +8,7 @@ login_method_csp = csp
login_method_web_junction = web_junction
#login message
-login.error.hrid.empty = Login failed, please contact system administrator.
+login.error.hrid.empty = Login failed, please contact system administrator.
login.error.hrid.not-found = User not found, please contact system administrator.
login.error.user.inactive = Account is disabled, please contact system administrator.
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/portal.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/portal.properties
index 1e51bdf361..b68ab07f7f 100755
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/portal.properties
+++ b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/portal.properties
@@ -16,7 +16,8 @@ use_rest_for_functional_menu=true
##########################################################################
# Name of java class that implements the OnBoardingApiService interface.
-portal.api.impl.class = org.openecomp.portalapp.service.OnBoardingApiServiceImpl
+# epsdk 1.3 uses org.onap prefix
+portal.api.impl.class = org.onap.portalapp.service.OnBoardingApiServiceImpl
# CSP Global Log On for single sign on
ecomp_redirect_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPPORTAL/login.htm
@@ -25,7 +26,7 @@ ecomp_redirect_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPPORTAL
ecomp_rest_url = http://portal.api.simpledemo.openecomp.org:8989/ECOMPPORTAL/auxapi
-# Applications do not need to run a UEB listener in 1610.
+# Applications do not need to run a UEB listener in 1610.
ueb_listeners_enable = false
# UEB Configuration
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/system.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/system.properties
index 964c7c479e..d638abcca3 100755
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/system.properties
+++ b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPSDKAPP/system.properties
@@ -23,11 +23,10 @@ decryption_key = AGLDdG4D04BKm2IxIWEr8o==
# The following properties MAY require changes by partner applications.
##########################################################################
-#Mysql
-db.driver = com.mysql.jdbc.Driver
-db.connectionURL = jdbc:mysql://portaldb.onap-portal:3306/ecomp_sdk
+db.driver = org.mariadb.jdbc.Driver
+db.connectionURL = jdbc:mariadb://portaldb.onap-portal:3306/ecomp_sdk
db.userName = root
-db.password = password
+db.password = Aa123456
db.min_pool_size = 5
db.max_pool_size = 10
hb.dialect = org.hibernate.dialect.MySQLDialect
@@ -53,7 +52,7 @@ contact_us_link = https://todo_contact_us_link.com
# An Unique 128-bit value defined to identify a specific version
# of an application deployed on a specific virtual machine.
-# This value must be generated and updated by the application
+# This value must be generated and updated by the application
# which is using the ECOMP SDK at the time of its deployment.
# Online Unique UUID generator - https://www.uuidgenerator.net/
instance_uuid=8da691c9-987d-43ed-a358-00ac2f35685d
diff --git a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPWIDGETMS/application.properties b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPWIDGETMS/application.properties
index 6e9f6c013f..baf571eb45 100644
--- a/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPWIDGETMS/application.properties
+++ b/kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ECOMPWIDGETMS/application.properties
@@ -8,7 +8,7 @@ microservice.widget.location=/tmp
## App DB Properties
spring.datasource.url=jdbc:mysql://portaldb.onap-portal:3306/portal
spring.datasource.username=root
-spring.datasource.password=password
+spring.datasource.password=Aa123456
spring.jpa.properties.hibernate.dialect=org.hibernate.dialect.MySQLDialect
spring.database.driver.classname=org.mariadb.jdbc.Driver
spring.jpa.show-sql=false
diff --git a/kubernetes/config/docker/init/src/config/robot/eteshare/config/vm_properties.py b/kubernetes/config/docker/init/src/config/robot/eteshare/config/vm_properties.py
index d0ef25623c..5601bdd560 100755
--- a/kubernetes/config/docker/init/src/config/robot/eteshare/config/vm_properties.py
+++ b/kubernetes/config/docker/init/src/config/robot/eteshare/config/vm_properties.py
@@ -11,7 +11,7 @@ GLOBAL_INJECTED_DOCKER_VERSION = "1.1-STAGING-latest"
GLOBAL_INJECTED_GERRIT_BRANCH = "master"
GLOBAL_INJECTED_KEYSTONE = "OPENSTACK_KEYSTONE_IP_HERE"
GLOBAL_INJECTED_MR_IP_ADDR = "dmaap.onap-message-router"
-GLOBAL_INJECTED_MSO_IP_ADDR = "mso.onap-mso"
+GLOBAL_INJECTED_SO_IP_ADDR = "mso.onap-mso"
GLOBAL_INJECTED_NETWORK = "OPENSTACK_NETWORK_ID_WITH_ONAP_ROUTE_HERE"
GLOBAL_INJECTED_NEXUS_DOCKER_REPO = "nexus3.onap.org:10001"
GLOBAL_INJECTED_NEXUS_PASSWORD = "docker"
diff --git a/kubernetes/config/docker/init/src/config/sdnc/conf/aaiclient.properties b/kubernetes/config/docker/init/src/config/sdnc/conf/aaiclient.properties
index fbd74d5499..4f66294c42 100755
--- a/kubernetes/config/docker/init/src/config/sdnc/conf/aaiclient.properties
+++ b/kubernetes/config/docker/init/src/config/sdnc/conf/aaiclient.properties
@@ -26,65 +26,224 @@
#
# Certificate keystore and truststore
#
-org.openecomp.sdnc.sli.aai.ssl.trust=/opt/openecomp/sdnc/data/stores/truststore.openecomp.client.jks
-org.openecomp.sdnc.sli.aai.ssl.trust.psswd=adminadmin
-org.openecomp.sdnc.sli.aai.host.certificate.ignore=true
+org.onap.ccsdk.sli.adaptors.aai.ssl.trust=/opt/onap/sdnc/data/stores/truststore.openecomp.client.jks
+org.onap.ccsdk.sli.adaptors.aai.ssl.trust.psswd=adminadmin
+org.onap.ccsdk.sli.adaptors.aai.host.certificate.ignore=true
-org.openecomp.sdnc.sli.aai.client.name=ONAP
-org.openecomp.sdnc.sli.aai.client.psswd=ONAP
+org.onap.ccsdk.sli.adaptors.aai.client.name=AAI
+org.onap.ccsdk.sli.adaptors.aai.client.psswd=AAI
-org.openecomp.sdnc.sli.aai.application=ONAP
+org.onap.ccsdk.sli.adaptors.aai.application=openECOMP
+#connection.timeout=1000
+#read.timeout=2000
#
# Configuration file for A&AI Client
#
-org.openecomp.sdnc.sli.aai.uri=https://aai-service.onap-aai:8443
-connection.timeout=60000
-read.timeout=60000
+org.onap.ccsdk.sli.adaptors.aai.uri=https://aai-service.onap-aai:8443
+
# query
-org.openecomp.sdnc.sli.aai.path.query=/aai/v11/search/sdn-zone-query
-org.openecomp.sdnc.sli.aai.query.nodes=/aai/v11/search/nodes-query?search-node-type={node-type}&filter={entity-identifier}:EQUALS:{entity-name}
-org.openecomp.sdnc.sli.aai.query.generic=/aai/v11/search/generic-query?key={identifier}:{value}&start-node-type={start-node-type}&include=complex&depth=3
+org.onap.ccsdk.sli.adaptors.aai.path.query=/aai/v11/search/sdn-zone-query
+org.onap.ccsdk.sli.adaptors.aai.query.nodes=/aai/v11/search/nodes-query?search-node-type={node-type}&filter={entity-identifier}:EQUALS:{entity-name}
+org.onap.ccsdk.sli.adaptors.aai.query.generic=/aai/v11/search/generic-query?key={identifier}:{value}&start-node-type={start-node-type}&include=complex&depth=3
# named query
-org.openecomp.sdnc.sli.aai.query.named=/aai/search/named-query
+org.onap.ccsdk.sli.adaptors.aai.query.named=/aai/search/named-query
+
#update
-org.openecomp.sdnc.sli.aai.update=/aai/v11/actions/update
+org.onap.ccsdk.sli.adaptors.aai.update=/aai/v11/actions/update
-# UBB Notify
-org.openecomp.sdnc.sli.aai.path.notify=/aai/v11/actions/notify
-org.openecomp.sdnc.sli.aai.notify.selflink.fqdn=<%= @ubbUri %>/restconf/config/L3SDN-API:services/layer3-service-list/{service-instance-id}
-org.openecomp.sdnc.sli.aai.notify.selflink.avpn=<%= @ubbUri %>/restconf/config/L3AVPN-EVC-API:services/service-list/{service-instance-id}/service-data/avpn-logicalchannel-information
+# vce
+org.onap.ccsdk.sli.adaptors.aai.path.vce =/aai/v11/network/vces/vce/
+org.onap.ccsdk.sli.adaptors.aai.path.vces=/aai/v11/network/vces/
+
+# vpe
+org.onap.ccsdk.sli.adaptors.aai.path.vpe =/aai/v11/network/vpes/vpe/
+org.onap.ccsdk.sli.adaptors.aai.path.vpes=/aai/v11/network/vpes/
+
+# customer
+org.onap.ccsdk.sli.adaptors.aai.path.customer=/aai/v11/business/customers/customer/{customer-id}
+
+# service subscription
+org.onap.ccsdk.sli.adaptors.aai.path.service.subscription=/aai/v11/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}
+
+# service instance
+org.onap.ccsdk.sli.adaptors.aai.path.svcinst=/aai/v11/business/customers/customer/{customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances
+org.onap.ccsdk.sli.adaptors.aai.path.svcinst.query=/aai/v11/search/generic-query?key=service-instance.service-instance-id:{svc-instance-id}&start-node-type=service-instance&include=service-instance
+org.onap.ccsdk.sli.adaptors.aai.path.service.instance=/aai/v11/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances/service-instance/{service-instance-id}
+
+# complex
+org.onap.ccsdk.sli.adaptors.aai.path.complexes=/aai/v11/cloud-infrastructure/complexes
+org.onap.ccsdk.sli.adaptors.aai.path.complex=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}
+
+# tenant
+org.onap.ccsdk.sli.adaptors.aai.path.tenant=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}
+org.onap.ccsdk.sli.adaptors.aai.path.tenant.query=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant?tenant-name={tenant-name}
+
+# vservers
+org.onap.ccsdk.sli.adaptors.aai.path.vservers=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/
+org.onap.ccsdk.sli.adaptors.aai.path.vserver=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}
+
+# vpls-pe
+org.onap.ccsdk.sli.adaptors.aai.path.vpls.pes=/aai/v11/network/vpls-pes/
+org.onap.ccsdk.sli.adaptors.aai.path.vpls.pe =/aai/v11/network/vpls-pes/vpls-pe/
+
+# ctag-pool
+org.onap.ccsdk.sli.adaptors.aai.path.ctag.pools=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools
+org.onap.ccsdk.sli.adaptors.aai.path.ctag.pool=/aai/v11/cloud-infrastructure/complexes/complex/{physical-location-id}/ctag-pools/ctag-pool/{target-pe}/{availability-zone-name}
+
+#
+#-------------- 1510 ----------------------
+#
+
+# pservers
+org.onap.ccsdk.sli.adaptors.aai.path.pservers=/aai/v11/cloud-infrastructure/pservers
+org.onap.ccsdk.sli.adaptors.aai.path.pserver=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}
+
+# generic-vnf
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnfs=/aai/v11/network/generic-vnfs
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}
+
+# dvs-switch
+org.onap.ccsdk.sli.adaptors.aai.path.dvsswitches=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches
+org.onap.ccsdk.sli.adaptors.aai.path.dvsswitch=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/dvs-switches/dvs-switch/{switch-name}
+
+# L3 Networks
+org.onap.ccsdk.sli.adaptors.aai.path.l3networks=/aai/v11/network/l3-networks
+org.onap.ccsdk.sli.adaptors.aai.path.l3network=/aai/v11/network/l3-networks/l3-network/{network-id}
+org.onap.ccsdk.sli.adaptors.aai.path.l3network.query.name=/aai/v11/network/l3-networks/l3-network?network-name={network-name}
# P-Interfaces
-org.openecomp.sdnc.sli.aai.path.pserver.pinterfaces=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces
-org.openecomp.sdnc.sli.aai.path.pserver.pinterface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pserver.pinterfaces=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces
+org.onap.ccsdk.sli.adaptors.aai.path.pserver.pinterface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{interface-name}
+
+# Physical Link
+org.onap.ccsdk.sli.adaptors.aai.path.physical.links=/aai/v11/network/physical-links
+org.onap.ccsdk.sli.adaptors.aai.path.physical.link=/aai/v11/network/physical-links/physical-link/{link-name}
+
+# VPN Bindings
+org.onap.ccsdk.sli.adaptors.aai.path.vpn.bindings=/aai/v11/network/vpn-bindings/
+org.onap.ccsdk.sli.adaptors.aai.path.vpn.binding=/aai/v11/network/vpn-bindings/vpn-binding/{vpn-id}
+
+# VNF IMAGES
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.images=/aai/v11/service-design-and-creation/vnf-images
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.image=/aai/v11/service-design-and-creation/vnf-images/vnf-image/{att-uuid}
+org.onap.ccsdk.sli.adaptors.aai.path.vnf.image.query=/aai/v11/service-design-and-creation/vnf-images/vnf-image?application={application_model}&application-vendor={application_vendor}
+
+# UBB Notify
+org.onap.ccsdk.sli.adaptors.aai.path.notify=/aai/v11/actions/notify
+org.onap.ccsdk.sli.adaptors.aai.notify.selflink.fqdn=https://aai-service.onap-aai:8443/restconf/config/L3SDN-API:services/layer3-service-list/{service-instance-id}
+org.onap.ccsdk.sli.adaptors.aai.notify.selflink.avpn=https://aai-service.onap-aai:8543/restconf/config/L3AVPN-EVC-API:services/service-list/{service-instance-id}/service-data/avpn-logicalchannel-information
# Service
-org.openecomp.sdnc.sli.aai.path.service=/aai/v11/service-design-and-creation/services/service/{service-id}
-org.openecomp.sdnc.sli.aai.path.services=/aai/v11/service-design-and-creation/services
+org.onap.ccsdk.sli.adaptors.aai.path.service=/aai/v11/service-design-and-creation/services/service/{service-id}
+org.onap.ccsdk.sli.adaptors.aai.path.services=/aai/v11/service-design-and-creation/services
-# service instance
-org.openecomp.sdnc.sli.aai.path.svcinst=/aai/v11/business/customers/customer/{customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances
-org.openecomp.sdnc.sli.aai.path.svcinst.query=/aai/v11/search/generic-query?key=service-instance.service-instance-id:{svc-instance-id}&start-node-type=service-instance&include=service-instance
-org.openecomp.sdnc.sli.aai.path.service.instance=/aai/v11/business/customers/customer/{global-customer-id}/service-subscriptions/service-subscription/{service-type}/service-instances/service-instance/{service-instance-id}
+
+#
+#-------------- 1604 ----------------------
+#
+
+# VNFC
+org.onap.ccsdk.sli.adaptors.aai.path.vnfc=/aai/v11/network/vnfcs/vnfc/{vnfc-name}
+
+# class-of-service
+org.onap.ccsdk.sli.adaptors.aai.path.class.of.service=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}/classes-of-service/class-of-service/{cos-id}
+
+# site-pair
+org.onap.ccsdk.sli.adaptors.aai.path.site.pair=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}/site-pairs/site-pair/{site-pair-id}
+
+# routing-instance
+org.onap.ccsdk.sli.adaptors.aai.path.routing.instance=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}/routing-instances/routing-instance/{routing-instance-id}
# site-pair-set
-org.openecomp.sdnc.sli.aai.path.site.pair.set=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}
+org.onap.ccsdk.sli.adaptors.aai.path.site.pair.set=/aai/v11/network/site-pair-sets/site-pair-set/{site-pair-set-id}
-# VNF IMAGES QUERY
-org.openecomp.sdnc.sli.aai.path.vnf.image.query=/aai/v11/service-design-and-creation/vnf-images/vnf-image?application={application_model}&application-vendor={application_vendor}
+# license key resource
+org.onap.ccsdk.sli.adaptors.aai.path.license.acquire=/aai/v11/actions/assignment/license-management/assignment-group-uuid/{assignment-group-uuid}
+org.onap.ccsdk.sli.adaptors.aai.path.license=/aai/v11/license-management/license-key-resources/license-key-resource/{att-uuid}
-# vservers
-org.openecomp.sdnc.sli.aai.path.vservers=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/
-org.openecomp.sdnc.sli.aai.path.vserver=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}
+# logical-link
+org.onap.ccsdk.sli.adaptors.aai.path.logical.link =/aai/v11/network/logical-links/logical-link/{link-name}
+
+# virtual-data-center
+org.onap.ccsdk.sli.adaptors.aai.path.virtual.data.center=/aai/v11/cloud-infrastructure/virtual-data-centers/virtual-data-center/{vdc-id}
+
+# wan-connector
+org.onap.ccsdk.sli.adaptors.aai.path.wan.connector=/aai/v11/business/connectors/connector/{resource-instance-id}
+
+# l-interface
+org.onap.ccsdk.sli.adaptors.aai.path.lag.interface.l.interface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.p.interface.l.interface=/aai/v11/cloud-infrastructure/pservers/pserver/{hostname}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+
+# l-interface pnf
+org.onap.ccsdk.sli.adaptors.aai.path.lag.interface.l.interface.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.p.interface.l.interface.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+
+# subinterface
+org.onap.ccsdk.sli.adaptors.aai.path.pnf.lag.interface.subinterface=/aai/v11/network/pnfs/pnf/{pnf-name}/lag-interfaces/lag-interface/{lag-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+org.onap.ccsdk.sli.adaptors.aai.path.pnf.p.interface.l.interface=/aai/v11/network/pnfs/pnf/{pnf-name}/p-interfaces/p-interface/{p-interface.interface-name}/l-interfaces/l-interface/{interface-name}
+
+# vlans
+org.onap.ccsdk.sli.adaptors.aai.path.vlan=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/tenants/tenant/{tenant-id}/vservers/vserver/{vserver-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf.vlan=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}
+
+# l3-interface-ipv4-address-list
+org.onap.ccsdk.sli.adaptors.aai.path.l3.interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+org.onap.ccsdk.sli.adaptors.aai.path.vlan.l3.interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+
+# l3-interface-ipv6-address-list
+org.onap.ccsdk.sli.adaptors.aai.path.l3.interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+org.onap.ccsdk.sli.adaptors.aai.path.vlan.l3.interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+
+# ipsec-configuration
+org.onap.ccsdk.sli.adaptors.aai.path.ipsec.configuration=/aai/v11/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}
+
+# vig server
+org.onap.ccsdk.sli.adaptors.aai.path.vig.server=/aai/v11/network/ipsec-configurations/ipsec-configuration/{ipsec-configuration-id}/vig-servers/vig-server/{vig-address-type}
+
+# l3-network
+org.onap.ccsdk.sli.adaptors.aai.path.l3.network=/aai/v11/network/l3-networks/l3-network/{network-id}
+
+# subnet
+org.onap.ccsdk.sli.adaptors.aai.path.subnet=/aai/v11/network/l3-networks/l3-network/{network-id}/subnets/subnet/{subnet-id}
+
+# multicast-configuration
+org.onap.ccsdk.sli.adaptors.aai.path.multicast.configuration=/aai/v11/network/multicast-configurations/multicast-configuration/{multicast-configuration-id}
+
+# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.ipv4.address.list
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+
+# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.vlan.ipv4.address.list
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.vlan.ipv4.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/vlans/vlan/{vlan-interface}/l3-interface-ipv4-address-list/{l3-interface-ipv4-address}
+
+# org.onap.ccsdk.sli.adaptors.aai.path.l.interface.ipv6.address.list
+org.onap.ccsdk.sli.adaptors.aai.path.l3-interface.ipv6.address.list=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}/l3-interface-ipv6-address-list/{l3-interface-ipv6-address}
+
+# volume.group
+org.onap.ccsdk.sli.adaptors.aai.path.volume.group=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}/volume-groups/volume-group/{volume-group-id}
+
+#cloud region
+org.onap.ccsdk.sli.adaptors.aai.path.cloud.region=/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/{cloud-owner}/{cloud-region-id}
+
+# vf-module
+org.onap.ccsdk.sli.adaptors.aai.path.vf.module=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/vf-modules/vf-module/{vf-module-id}
+
+# l-interface through generic-vnf
+org.onap.ccsdk.sli.adaptors.aai.path.generic.vnf.linterface=/aai/v11/network/generic-vnfs/generic-vnf/{vnf-id}/l-interfaces/l-interface/{interface-name}
+
+# network-policy
+org.onap.ccsdk.sli.adaptors.aai.path.network.policy=/aai/v11/network/network-policies/network-policy/{network-policy-id}
+
+# pnf
+org.onap.ccsdk.sli.adaptors.aai.path.pnf=/aai/v11/network/pnfs/pnf/{pnf-name}
#
# Formatting
#
-org.openecomp.sdnc.sli.aai.param.format=filter=%s:%s
-org.openecomp.sdnc.sli.aai.param.vnf_type=vnf-type
-org.openecomp.sdnc.sli.aai.param.physical.location.id=physical-location-id
-org.openecomp.sdnc.sli.aai.param.service.type=service-type
+org.onap.ccsdk.sli.adaptors.aai.param.format=filter=%s:%s
+org.onap.ccsdk.sli.adaptors.aai.param.vnf_type=vnf-type
+org.onap.ccsdk.sli.adaptors.aai.param.physical.location.id=physical-location-id
+org.onap.ccsdk.sli.adaptors.aai.param.service.type=service-type
diff --git a/kubernetes/config/docker/init/src/config/sdnc/conf/admportal.json b/kubernetes/config/docker/init/src/config/sdnc/conf/admportal.json
index fc7f0e482d..f6f59ae797 100755
--- a/kubernetes/config/docker/init/src/config/sdnc/conf/admportal.json
+++ b/kubernetes/config/docker/init/src/config/sdnc/conf/admportal.json
@@ -25,7 +25,7 @@
"docker0": "172.17.0.1",
"virbr0": "192.168.122.1"
},
- "svclogicPropertiesDb01": "/opt/openecomp/sdnc/data/properties/svclogic.properties.sdnctldb01",
+ "svclogicPropertiesDb01": "/opt/onap/sdnc/data/properties/svclogic.properties.sdnctldb01",
"databases": [
"dbhost|sdnctldb01"
],
diff --git a/kubernetes/oneclick/aaf.sh b/kubernetes/oneclick/aaf.sh
deleted file mode 100644
index 9a85300722..0000000000
--- a/kubernetes/oneclick/aaf.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "AAF....\n"
-
-$KUBECTL_CMD/aaf-deployment.yaml
-$KUBECTL_CMD/aaf-cs-deployment.yaml
diff --git a/kubernetes/oneclick/aai.sh b/kubernetes/oneclick/aai.sh
deleted file mode 100755
index 9a8e837093..0000000000
--- a/kubernetes/oneclick/aai.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "AAI....\n"
-$KUBECTL_CMD/hbase-deployment.yaml
-$KUBECTL_CMD/aai-deployment.yaml
-$KUBECTL_CMD/modelloader-deployment.yaml
diff --git a/kubernetes/oneclick/appc.sh b/kubernetes/oneclick/appc.sh
deleted file mode 100755
index 2db7b210b2..0000000000
--- a/kubernetes/oneclick/appc.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "App-c....\n"
-$KUBECTL_CMD/db-deployment.yaml
-$KUBECTL_CMD/appc-deployment.yaml
-$KUBECTL_CMD/dgbuilder-deployment.yaml
diff --git a/kubernetes/oneclick/clamp.sh b/kubernetes/oneclick/clamp.sh
deleted file mode 100644
index 2440ccce55..0000000000
--- a/kubernetes/oneclick/clamp.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "CLAMP....\n"
-
-$KUBECTL_CMD/clamp-maraidb-deployment.yaml
-$KUBECTL_CMD/clamp-deployment.yaml
diff --git a/kubernetes/oneclick/createAll.bash b/kubernetes/oneclick/createAll.bash
index 2b4f48b8ff..72d852ded6 100755
--- a/kubernetes/oneclick/createAll.bash
+++ b/kubernetes/oneclick/createAll.bash
@@ -26,6 +26,11 @@ check_return_code(){
fi
}
+create_service_account() {
+ cmd=`echo kubectl create clusterrolebinding $1-$2-admin-binding --clusterrole=cluster-admin --serviceaccount=$1-$2:default`
+ eval ${cmd}
+ check_return_code $cmd
+}
create_namespace() {
cmd=`echo kubectl create namespace $1-$2`
@@ -164,6 +169,9 @@ for i in ${HELM_APPS[@]}; do
printf "\nCreating namespace **********\n"
create_namespace $NS $i
+ printf "\nCreating service account **********\n"
+ create_service_account $NS $i
+
printf "\nCreating registry secret **********\n"
create_registry_key $NS $i ${NS}-docker-registry-key $ONAP_DOCKER_REGISTRY $DU $DP $ONAP_DOCKER_MAIL
diff --git a/kubernetes/oneclick/dcae.sh b/kubernetes/oneclick/dcae.sh
deleted file mode 100755
index 5dd1a2f071..0000000000
--- a/kubernetes/oneclick/dcae.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-# manual deployment order is mentioned - but we need to specify dependencies in the service
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "dcae....\n"
-$KUBECTL_CMD/message-router/dcae-zookeeper.yaml
-$KUBECTL_CMD/message-router/dcae-kafka.yaml
-$KUBECTL_CMD/message-router/dcae-dmaap.yaml
-
-$KUBECTL_CMD/pgaas/pgaas.yaml
-
-$KUBECTL_CMD/dcae-collector-common-event.yaml
-$KUBECTL_CMD/dcae-collector-dmaapbc.yaml
-$KUBECTL_CMD/dcae-collector-pvs.yaml
-
-$KUBECTL_CMD/cdap/cdap0-dep.yaml
-$KUBECTL_CMD/cdap/cdap1-dep.yaml
-$KUBECTL_CMD/cdap/cdap2-dep.yaml
diff --git a/kubernetes/oneclick/deleteAll.bash b/kubernetes/oneclick/deleteAll.bash
index 4818155e7d..2d5779ea2a 100755
--- a/kubernetes/oneclick/deleteAll.bash
+++ b/kubernetes/oneclick/deleteAll.bash
@@ -12,6 +12,11 @@ delete_namespace() {
printf "Namespace $_NS deleted.\n\n"
}
+delete_service_account() {
+ kubectl delete clusterrolebinding $1-$2-admin-binding
+ printf "Service account $1-$2-admin-binding deleted.\n\n"
+}
+
delete_registry_key() {
kubectl --namespace $1-$2 delete secret ${1}-docker-registry-key
}
@@ -76,6 +81,7 @@ for i in ${HELM_APPS[@]}; do
delete_app_helm $NS $i
delete_namespace $NS $i
+ delete_service_account $NS $i
done
diff --git a/kubernetes/oneclick/message-router.sh b/kubernetes/oneclick/message-router.sh
deleted file mode 100755
index 0369fd1a18..0000000000
--- a/kubernetes/oneclick/message-router.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "Message Router....\n"
-$KUBECTL_CMD/message-router-zookeeper.yaml
-$KUBECTL_CMD/message-router-kafka.yaml
-$KUBECTL_CMD/message-router-dmaap.yaml
diff --git a/kubernetes/oneclick/mso.sh b/kubernetes/oneclick/mso.sh
deleted file mode 100755
index 360f3b80d9..0000000000
--- a/kubernetes/oneclick/mso.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "MSO....\n"
-
-$KUBECTL_CMD/db-deployment.yaml
-$KUBECTL_CMD/mso-deployment.yaml
diff --git a/kubernetes/oneclick/policy.sh b/kubernetes/oneclick/policy.sh
deleted file mode 100755
index 85e58f6468..0000000000
--- a/kubernetes/oneclick/policy.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "Policy....\n"
-
-$KUBECTL_CMD/dep-maria.yaml
-$KUBECTL_CMD/dep-nexus.yaml
-$KUBECTL_CMD/dep-pap.yaml
-$KUBECTL_CMD/dep-pdp.yaml
-$KUBECTL_CMD/dep-brmsgw.yaml
-$KUBECTL_CMD/dep-pypdp.yaml
-$KUBECTL_CMD/dep-drools.yaml
diff --git a/kubernetes/oneclick/portal.sh b/kubernetes/oneclick/portal.sh
deleted file mode 100755
index c1484859b3..0000000000
--- a/kubernetes/oneclick/portal.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "Portal....\n"
-$KUBECTL_CMD/portal-mariadb-deployment.yaml
-$KUBECTL_CMD/portal-apps-deployment.yaml
-$KUBECTL_CMD/portal-widgets-deployment.yaml
-$KUBECTL_CMD/portal-vnc-dep.yaml
diff --git a/kubernetes/oneclick/robot.sh b/kubernetes/oneclick/robot.sh
deleted file mode 100755
index 99a5748f7a..0000000000
--- a/kubernetes/oneclick/robot.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "Robot....\n"
-$KUBECTL_CMD/robot-deployment.yaml
diff --git a/kubernetes/oneclick/sdc.sh b/kubernetes/oneclick/sdc.sh
deleted file mode 100755
index 72a85cbe4b..0000000000
--- a/kubernetes/oneclick/sdc.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "SDC....\n"
-$KUBECTL_CMD/sdc-es.yaml
-$KUBECTL_CMD/sdc-cs.yaml
-$KUBECTL_CMD/sdc-kb.yaml
-$KUBECTL_CMD/sdc-be.yaml
-$KUBECTL_CMD/sdc-fe.yaml
diff --git a/kubernetes/oneclick/sdnc.sh b/kubernetes/oneclick/sdnc.sh
deleted file mode 100755
index d7390ba01b..0000000000
--- a/kubernetes/oneclick/sdnc.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "SDNC....\n"
-$KUBECTL_CMD/db-deployment.yaml
-$KUBECTL_CMD/sdnc-deployment.yaml
-$KUBECTL_CMD/dgbuilder-deployment.yaml
-$KUBECTL_CMD/web-deployment.yaml
diff --git a/kubernetes/oneclick/vfc.sh b/kubernetes/oneclick/vfc.sh
deleted file mode 100755
index 0199a2a31b..0000000000
--- a/kubernetes/oneclick/vfc.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "vfc....\n"
-$KUBECTL_CMD/vfc-catalog-deployment.yaml
-$KUBECTL_CMD/vfc-emsdriver-deployment.yaml
-$KUBECTL_CMD/vfc-gvnfmdriver-deployment.yaml
-$KUBECTL_CMD/vfc-hwvnfmdriver-deployment.yaml
-$KUBECTL_CMD/vfc-jujudriver-deployment.yaml
-$KUBECTL_CMD/vfc-nslcm-deployment.yaml
-$KUBECTL_CMD/vfc-resmgr-deployment.yaml
-$KUBECTL_CMD/vfc-vnflcm-deployment.yaml
-$KUBECTL_CMD/vfc-vnfmgr-deployment.yaml
-$KUBECTL_CMD/vfc-vnfres-deployment.yaml
-$KUBECTL_CMD/vfc-workflow-deployment.yaml
-$KUBECTL_CMD/vfc-ztesdncdriver-deployment.yaml
-$KUBECTL_CMD/vfc-ztevmanagerdriver-deployment.yaml
diff --git a/kubernetes/oneclick/vid.sh b/kubernetes/oneclick/vid.sh
deleted file mode 100755
index b88a3fb4a8..0000000000
--- a/kubernetes/oneclick/vid.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "VID....\n"
-$KUBECTL_CMD/vid-mariadb-deployment.yaml
-$KUBECTL_CMD/vid-server-deployment.yaml
diff --git a/kubernetes/oneclick/vnfsdk.sh b/kubernetes/oneclick/vnfsdk.sh
deleted file mode 100644
index c2853f7e1e..0000000000
--- a/kubernetes/oneclick/vnfsdk.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-KUBECTL_CMD="kubectl --namespace $1-$2 $3 -f ../$2"
-
-printf "VNFSDK....\n"
-
-$KUBECTL_CMD/postgres-deployment.yaml
-$KUBECTL_CMD/refrepo-deployment.yaml
diff --git a/kubernetes/portal/templates/portal-apps-deployment.yaml b/kubernetes/portal/templates/portal-apps-deployment.yaml
index 13cf57093d..c9cb645bae 100755
--- a/kubernetes/portal/templates/portal-apps-deployment.yaml
+++ b/kubernetes/portal/templates/portal-apps-deployment.yaml
@@ -38,7 +38,7 @@ spec:
"name": "portalapps-readiness"
},
{
- "command": ["/bin/bash", "-c", "if [ ! -e /portal_root/boot.txt ]; then mysql -u root -ppassword -h portaldb < /portal-mysql/oom_updates.sql; fi"],
+ "command": ["/bin/bash", "-c", "if [ ! -e /portal_root/boot.txt ]; then mysql -u root -pAa123456 -h portaldb < /portal-mysql/oom_updates.sql; fi"],
"volumeMounts": [
{
"mountPath": "/portal-mysql/oom_updates.sql",
@@ -77,14 +77,6 @@ spec:
name: portal-portal-properties
- mountPath: "{{ .Values.onapPortal.webappsDir }}/ECOMPPORTAL/WEB-INF/classes/logback.xml"
name: portal-logback
- - mountPath: "{{ .Values.onapPortal.webappsDir }}/ECOMPDBCAPP/WEB-INF/fusion/conf/fusion.properties"
- name: dbcapp-fusion-properties
- - mountPath: "{{ .Values.onapPortal.webappsDir }}/ECOMPDBCAPP/WEB-INF/conf/system.properties"
- name: dbcapp-system-properties
- - mountPath: "{{ .Values.onapPortal.webappsDir }}/ECOMPDBCAPP/WEB-INF/classes/portal.properties"
- name: dbcapp-portal-properties
- - mountPath: "{{ .Values.onapPortal.webappsDir }}/ECOMPDBCAPP/WEB-INF/dbcapp/dbcapp.properties"
- name: dbcapp-dbcapp-properties
- mountPath: "{{ .Values.onapPortal.webappsDir }}/ECOMPSDKAPP/WEB-INF/conf/system.properties"
name: sdkapp-system-properties
- mountPath: "{{ .Values.onapPortal.webappsDir }}/ECOMPSDKAPP/WEB-INF/classes/portal.properties"
diff --git a/kubernetes/portal/templates/portal-mariadb-deployment.yaml b/kubernetes/portal/templates/portal-mariadb-deployment.yaml
index df38a2abb3..8dc7ebabf3 100755
--- a/kubernetes/portal/templates/portal-mariadb-deployment.yaml
+++ b/kubernetes/portal/templates/portal-mariadb-deployment.yaml
@@ -14,14 +14,14 @@ spec:
name: portaldb
spec:
containers:
- - image: {{ .Values.image.portaldb}}
+ - image: {{ .Values.image.portaldb}}
imagePullPolicy: {{ .Values.pullPolicy }}
name: "portaldb"
env:
- name: MYSQL_HOST
value: "portaldb.{{ .Values.nsPrefix }}-portal"
- name: MYSQL_ROOT_PASSWORD
- value: password
+ value: Aa123456
volumeMounts:
- mountPath: /etc/localtime
name: localtime
diff --git a/kubernetes/portal/values.yaml b/kubernetes/portal/values.yaml
index 757995ab63..eadc834599 100644
--- a/kubernetes/portal/values.yaml
+++ b/kubernetes/portal/values.yaml
@@ -3,10 +3,10 @@ pullPolicy: Always
nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
- portalapps: nexus3.onap.org:10001/openecomp/portal-apps:1.1-STAGING-latest
- portaldb: nexus3.onap.org:10001/openecomp/portal-db:1.1-STAGING-latest
+ portalapps: nexus3.onap.org:10001/openecomp/portal-apps:1.3-STAGING-latest
+ portaldb: nexus3.onap.org:10001/openecomp/portal-db:1.3-STAGING-latest
mariadbClient: oomk8s/mariadb-client-init:1.0.0
- portalwms: nexus3.onap.org:10001/openecomp/portal-wms:1.1-STAGING-latest
+ portalwms: nexus3.onap.org:10001/openecomp/portal-wms:1.3-STAGING-latest
ubuntuInit: oomk8s/ubuntu-init:1.0.0
ubuntuDesktop: dorowu/ubuntu-desktop-lxde-vnc
filebeat: docker.elastic.co/beats/filebeat:5.5.0
diff --git a/kubernetes/sdc/templates/sdc-be.yaml b/kubernetes/sdc/templates/sdc-be.yaml
index 29f029f9ed..0ba4db50ce 100644
--- a/kubernetes/sdc/templates/sdc-be.yaml
+++ b/kubernetes/sdc/templates/sdc-be.yaml
@@ -86,8 +86,12 @@ spec:
name: sdc-logs
- mountPath: /var/log/onap
name: sdc-logs-2
- - mountPath: /var/lib/jetty/config/catalog-be/logback.xml
+ - mountPath: /tmp/logback.xml
name: sdc-logback
+ lifecycle:
+ postStart:
+ exec:
+ command: ["/bin/sh", "-c", "export LOG=wait_logback.log; touch $LOG; export SRC=/tmp/logback.xml; export DST=/var/lib/jetty/config/catalog-be/; while [ ! -e $DST ]; do echo 'Waiting for $DST...' >> $LOG; sleep 5; done; sleep 2; /bin/cp -f $SRC $DST; echo 'Done' >> $LOG"]
ports:
- containerPort: 8443
- containerPort: 8080
diff --git a/kubernetes/sdnc/templates/all-services.yaml b/kubernetes/sdnc/templates/all-services.yaml
index 7223dfdcd7..687a6bb934 100644
--- a/kubernetes/sdnc/templates/all-services.yaml
+++ b/kubernetes/sdnc/templates/all-services.yaml
@@ -78,10 +78,13 @@ metadata:
]'
spec:
ports:
- - name: "sdnc-port"
+ - name: "sdnc-port-8181"
port: 8282
targetPort: 8181
nodePort: {{ .Values.nodePortPrefix }}02
+ - name: "sdnc-port-8101"
+ port: 8201
+ targetPort: 8101
type: NodePort
selector:
app: sdnc
diff --git a/kubernetes/sdnc/templates/dgbuilder-deployment.yaml b/kubernetes/sdnc/templates/dgbuilder-deployment.yaml
index ba2008ab4a..0e2166e92a 100644
--- a/kubernetes/sdnc/templates/dgbuilder-deployment.yaml
+++ b/kubernetes/sdnc/templates/dgbuilder-deployment.yaml
@@ -45,12 +45,12 @@ spec:
- command:
- /bin/bash
- -c
- - cd /opt/openecomp/sdnc/dgbuilder/ && ./start.sh sdnc1.0 && wait
+ - cd /opt/onap/sdnc/dgbuilder/ && ./start.sh sdnc1.0 && wait
env:
- name: MYSQL_ROOT_PASSWORD
value: openECOMP1.0
- name: SDNC_CONFIG_DIR
- value: /opt/openecomp/sdnc/data/properties
+ value: /opt/onap/sdnc/data/properties
image: {{ .Values.image.dgbuilderSdnc }}
imagePullPolicy: {{ .Values.pullPolicy }}
name: sdnc-dgbuilder-container
diff --git a/kubernetes/sdnc/templates/sdnc-deployment.yaml b/kubernetes/sdnc/templates/sdnc-deployment.yaml
index 8b4d0ebad6..95c282b602 100644
--- a/kubernetes/sdnc/templates/sdnc-deployment.yaml
+++ b/kubernetes/sdnc/templates/sdnc-deployment.yaml
@@ -41,12 +41,12 @@ spec:
spec:
containers:
- command:
- - /opt/openecomp/sdnc/bin/startODL.sh
+ - /opt/onap/sdnc/bin/startODL.sh
env:
- name: MYSQL_ROOT_PASSWORD
value: openECOMP1.0
- name: SDNC_CONFIG_DIR
- value: /opt/openecomp/sdnc/data/properties
+ value: /opt/onap/sdnc/data/properties
image: {{ .Values.image.sdnc }}
imagePullPolicy: {{ .Values.pullPolicy }}
name: sdnc-controller-container
@@ -54,9 +54,9 @@ spec:
- mountPath: /etc/localtime
name: localtime
readOnly: true
- - mountPath: /opt/openecomp/sdnc/data/properties/aaiclient.properties
+ - mountPath: /opt/onap/sdnc/data/properties/aaiclient.properties
name: sdnc-aaiclient-properties
- - mountPath: /opt/openecomp/sdnc/data/properties/admportal.json
+ - mountPath: /opt/onap/sdnc/data/properties/admportal.json
name: sdnc-admportal-json
- mountPath: /var/log/onap
name: sdnc-logs
@@ -64,6 +64,7 @@ spec:
name: sdnc-log-config
ports:
- containerPort: 8181
+ - containerPort: 8101
readinessProbe:
tcpSocket:
port: 8181
diff --git a/kubernetes/sdnc/templates/web-deployment.yaml b/kubernetes/sdnc/templates/web-deployment.yaml
index 4f7886dc9c..45e2e26df3 100644
--- a/kubernetes/sdnc/templates/web-deployment.yaml
+++ b/kubernetes/sdnc/templates/web-deployment.yaml
@@ -45,12 +45,12 @@ spec:
- command:
- /bin/bash
- -c
- - cd /opt/openecomp/sdnc/admportal/shell && ./start_portal.sh
+ - cd /opt/onap/sdnc/admportal/shell && ./start_portal.sh
env:
- name: MYSQL_ROOT_PASSWORD
value: openECOMP1.0
- name: SDNC_CONFIG_DIR
- value: /opt/openecomp/sdnc/data/properties
+ value: /opt/onap/sdnc/data/properties
image: {{ .Values.image.admportalSdnc }}
imagePullPolicy: {{ .Values.pullPolicy }}
name: sdnc-portal-container
@@ -60,6 +60,10 @@ spec:
- name: localtime
mountPath: /etc/localtime
readOnly: true
+ - mountPath: /opt/onap/sdnc/data/properties/aaiclient.properties
+ name: sdnc-aaiclient-properties
+ - mountPath: /opt/onap/sdnc/data/properties/admportal.json
+ name: sdnc-admportal-json
readinessProbe:
tcpSocket:
port: 8843
@@ -70,5 +74,11 @@ spec:
- name: localtime
hostPath:
path: /etc/localtime
+ - name: sdnc-aaiclient-properties
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/aaiclient.properties
+ - name: sdnc-admportal-json
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/admportal.json
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
diff --git a/kubernetes/sdnc/values.yaml b/kubernetes/sdnc/values.yaml
index 20d045807e..f7cbecab9f 100644
--- a/kubernetes/sdnc/values.yaml
+++ b/kubernetes/sdnc/values.yaml
@@ -4,7 +4,7 @@ nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
mysqlServer: mysql/mysql-server:5.6
- dgbuilderSdnc: nexus3.onap.org:10001/openecomp/dgbuilder-sdnc-image:1.1-STAGING-latest
- sdnc: nexus3.onap.org:10001/openecomp/sdnc-image:1.1-STAGING-latest
- admportalSdnc: nexus3.onap.org:10001/openecomp/admportal-sdnc-image:1.1-STAGING-latest
+ dgbuilderSdnc: nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:0.1-STAGING-latest
+ sdnc: nexus3.onap.org:10001/onap/sdnc-image:1.2-STAGING-latest
+ admportalSdnc: nexus3.onap.org:10001/onap/admportal-sdnc-image:1.2-STAGING-latest
filebeat: docker.elastic.co/beats/filebeat:5.5.0 \ No newline at end of file
diff --git a/kubernetes/vfc/templates/all-services.yaml b/kubernetes/vfc/templates/all-services.yaml
index 9a6f6f25a9..a54dc00b2c 100755
--- a/kubernetes/vfc/templates/all-services.yaml
+++ b/kubernetes/vfc/templates/all-services.yaml
@@ -5,6 +5,17 @@ metadata:
app: vfc-nslcm
name: vfc-nslcm
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "nslcm",
+ "version": "v1",
+ "url": "/api/nslcm/v1",
+ "protocol": "REST",
+ "port": "{{.Values.nslcm.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-nslcm
@@ -22,6 +33,17 @@ metadata:
app: vfc-resmgr
name: vfc-resmgr
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "resmgr",
+ "version": "v1",
+ "url": "/api/resmgr/v1",
+ "protocol": "REST",
+ "port": "{{.Values.resmgr.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-resmgr
@@ -39,6 +61,17 @@ metadata:
app: vfc-gvnfmdriver
name: vfc-gvnfmdriver
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "gvnfmdriver",
+ "version": "v1",
+ "url": "/api/gvnfmdriver/v1",
+ "protocol": "REST",
+ "port": "{{.Values.gvnfmdriver.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-gvnfmdriver
@@ -56,6 +89,17 @@ metadata:
app: vfc-ztevmanagerdriver
name: vfc-ztevmanagerdriver
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "ztevmanagerdriver",
+ "version": "v1",
+ "url": "/api/ztevmanagerdriver/v1",
+ "protocol": "REST",
+ "port": "{{.Values.ztevmanagerdriver.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-ztevmanagerdriver
@@ -73,6 +117,17 @@ metadata:
app: vfc-hwvnfmdriver
name: vfc-hwvnfmdriver
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "huaweivnfmdriver",
+ "version": "v1",
+ "url": "/api/huaweivnfmdriver/v1",
+ "protocol": "REST",
+ "port": "{{.Values.hwvnfmdriver.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-ztevmanagerdriver
@@ -90,6 +145,17 @@ metadata:
app: vfc-ztesdncdriver
name: vfc-ztesdncdriver
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "ztesdncdriver",
+ "version": "v1",
+ "url": "/api/ztesdncdriver/v1",
+ "protocol": "REST",
+ "port": "{{.Values.ztesdncdriver.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-ztesdncdriver
@@ -107,6 +173,17 @@ metadata:
app: vfc-jujudriver
name: vfc-jujudriver
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "jujuvnfmdriver",
+ "version": "v1",
+ "url": "/api/jujuvnfmdriver/v1",
+ "protocol": "REST",
+ "port": "{{.Values.jujudriver.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-jujudriver
@@ -124,6 +201,17 @@ metadata:
app: vfc-vnflcm
name: vfc-vnflcm
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "vnflcm",
+ "version": "v1",
+ "url": "/api/vnflcm/v1",
+ "protocol": "REST",
+ "port": "{{.Values.vnflcm.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-vnflcm
@@ -141,6 +229,17 @@ metadata:
app: vfc-vnfres
name: vfc-vnfres
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "vnfres",
+ "version": "v1",
+ "url": "/api/vnfres/v1",
+ "protocol": "REST",
+ "port": "{{.Values.vnfres.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-vnfres
@@ -158,6 +257,17 @@ metadata:
app: vfc-vnfmgr
name: vfc-vnfmgr
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "vnfmgr",
+ "version": "v1",
+ "url": "/api/vnfmgr/v1",
+ "protocol": "REST",
+ "port": "{{.Values.vnfmgr.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-vnfmgr
@@ -175,6 +285,17 @@ metadata:
app: vfc-emsdriver
name: vfc-emsdriver
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "emsdriver",
+ "version": "v1",
+ "url": "/api/emsdriver/v1",
+ "protocol": "REST",
+ "port": "{{.Values.emsdriver.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-emsdriver
@@ -191,6 +312,17 @@ metadata:
app: vfc-workflow
name: vfc-workflow
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "workflow",
+ "version": "v1",
+ "url": "/api/workflow/v1",
+ "protocol": "REST",
+ "port": "{{.Values.workflow.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-workflow
@@ -208,6 +340,17 @@ metadata:
app: vfc-catalog
name: vfc-catalog
namespace: "{{ .Values.nsPrefix }}-vfc"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "catalog",
+ "version": "v1",
+ "url": "/api/catalog/v1",
+ "protocol": "REST",
+ "port": "{{.Values.catalog.port}}",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: vfc-catalog
diff --git a/onap-blueprint.yaml b/onap-blueprint.yaml
deleted file mode 100644
index 699312bea8..0000000000
--- a/onap-blueprint.yaml
+++ /dev/null
@@ -1,223 +0,0 @@
-tosca_definitions_version: cloudify_dsl_1_3
-
-description: >
- Blueprint deploys all ONAP kubernetes resources defined in YAML files on existing kubernetes cluster
- The following pre-setup steps are assumed, but not required:
- - Create Cloudify Example Environment: https://github.com/cloudify-examples/cloudify-environment-setup.
- - Create Kubernetes Cluster: https://github.com/cloudify-examples/simple-kubernetes-blueprint.
-
-imports:
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
- # Plugin required: https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/releases/download/1.2.1rc1/cloudify_kubernetes_plugin-1.2.1rc1-py27-none-linux_x86_64-centos-Core.wgn
- - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1rc1/plugin.yaml
- # Plugin required: http://repository.cloudifysource.org/cloudify/wagons/cloudify-fabric-plugin/1.4.2/cloudify_fabric_plugin-1.4.2-py27-none-linux_x86_64-centos-Core.wgn
- - http://www.getcloudify.org/spec/fabric-plugin/1.4.2/plugin.yaml
- - cloudify/types/onap.yaml
-
-inputs:
- kubernetes_configuration_file_content:
- description: >
- File content of kubernetes master YAML configuration
-
- namespace_prefix:
- type: string
- description: >
- Kubernetes namespace name prefix which will be uese for all ONAP apps
- default: onap
-
-dsl_definitions:
- options: &app_options
- namespace:
- concat: [{ get_input: namespace_prefix }, '-', { get_property: [SELF, name] }]
-
-node_templates:
- kubernetes_master:
- type: cloudify.kubernetes.nodes.Master
- properties:
- configuration:
- file_content: { get_input: kubernetes_configuration_file_content }
-
- onap_environment:
- type: cloudify.onap.kubernetes.Environment
- properties:
- namespace: { get_input: namespace_prefix }
- init_pod: kubernetes/config/pod-config-init.yaml
- options:
- namespace: { get_input: namespace_prefix }
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
-
- mso_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: mso
- values: kubernetes/mso/values.yaml
- resources:
- - kubernetes/mso/templates/mso-deployment.yaml
- - kubernetes/mso/templates/db-deployment.yaml
- services: kubernetes/mso/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- message_router_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: message-router
- values: kubernetes/message-router/values.yaml
- resources:
- - kubernetes/message-router/templates/message-router-zookeeper.yaml
- - kubernetes/message-router/templates/message-router-dmaap.yaml
- - kubernetes/message-router/templates/message-router-kafka.yaml
- services: kubernetes/message-router/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- sdc_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: sdc
- values: kubernetes/sdc/values.yaml
- resources:
- - kubernetes/sdc/templates/sdc-es.yaml
- - kubernetes/sdc/templates/sdc-fe.yaml
- - kubernetes/sdc/templates/sdc-kb.yaml
- - kubernetes/sdc/templates/sdc-cs.yaml
- - kubernetes/sdc/templates/sdc-be.yaml
- services: kubernetes/sdc/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- aai_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: aai
- values: kubernetes/aai/values.yaml
- resources:
- - kubernetes/aai/templates/aai-deployment.yaml
- - kubernetes/aai/templates/modelloader-deployment.yaml
- - kubernetes/aai/templates/hbase-deployment.yaml
- services: kubernetes/aai/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- robot_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: robot
- values: kubernetes/robot/values.yaml
- resources:
- - kubernetes/robot/templates/robot-deployment.yaml
- services: kubernetes/robot/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- vid_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: vid
- values: kubernetes/vid/values.yaml
- resources:
- - kubernetes/templates/vid-mariadb-deployment.yaml
- - kubernetes/templates/vid-server-deployment.yaml
- services: kubernetes/vid/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- sdnc_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: sdnc
- values: kubernetes/sdnc/values.yaml
- resources:
- - kubernetes/sdnc/templates/web-deployment.yaml
- - kubernetes/sdnc/templates/sdnc-deployment.yaml
- - kubernetes/sdnc/templates/dgbuilder-deployment.yaml
- - kubernetes/sdnc/templates/db-deployment.yaml
- services: kubernetes/sdnc/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- portal_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: portal
- values: kubernetes/portal/values.yaml
- resources:
- - kubernetes/portal/templates/portal-widgets-deployment.yaml
- - kubernetes/portal/templates/portal-apps-deployment.yaml
- - kubernetes/portal/templates/portal-mariadb-deployment.yaml
- - kubernetes/portal/templates/portal-vnc-dep.yaml
- services: kubernetes/portal/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- policy_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: policy
- values: kubernetes/policy/values.yaml
- resources:
- - kubernetes/policy/templates/dep-drools.yaml
- - kubernetes/policy/templates/dep-nexus.yaml
- - kubernetes/policy/templates/dep-brmsgw.yaml
- - kubernetes/policy/templates/dep-pdp.yaml
- - kubernetes/policy/templates/dep-pap.yaml
- - kubernetes/policy/templates/dep-maria.yaml
- - kubernetes/policy/templates/dep-pypdp.yaml
- services: kubernetes/policy/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-
- appc_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: appc
- values: kubernetes/appc/values.yaml
- resources:
- - kubernetes/appc/templates/appc-deployment.yaml
- - kubernetes/appc/templates/dgbuilder-deployment.yaml
- - kubernetes/appc/templates/db-deployment.yaml
- services: kubernetes/appc/templates/all-services.yaml
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment