summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml10
-rw-r--r--TOSCA/VM/MessageRouter/MessageRouter.yaml259
-rw-r--r--TOSCA/VM/MessageRouter/scripts/messagerouter.sh59
-rw-r--r--TOSCA/VM/appc/appc.yaml278
-rw-r--r--TOSCA/VM/appc/scripts/appc.sh75
-rw-r--r--TOSCA/VM/policy/Policy.yaml263
-rw-r--r--TOSCA/VM/policy/scripts/policy.sh62
-rw-r--r--TOSCA/VM/so/scripts/so.sh81
-rw-r--r--TOSCA/VM/so/so.yaml273
-rw-r--r--cloudify-onap/blueprint.yaml188
-rw-r--r--cloudify-onap/docker-custom-readiness/Dockerfile21
-rw-r--r--cloudify-onap/docker-custom-readiness/ready.py85
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/__init__.py14
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/app.py47
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/__init__.py14
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/constants.py20
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/deployment_result.py27
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/helm.py62
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/init_pod.py63
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/namespace.py101
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/resources_services.py230
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/workarounds.py67
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/environment.py58
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/k8s_installer/requirements.txt1
-rwxr-xr-xcloudify-onap/plugins/onap-installation-plugin/plugin.yaml87
-rw-r--r--cloudify-onap/plugins/onap-installation-plugin/requirements.txt1
-rwxr-xr-xcloudify-onap/plugins/onap-installation-plugin/setup.py40
-rw-r--r--docs/Access_Rancher_server_via_web_browser.jpegbin0 -> 135026 bytes
-rw-r--r--docs/Add_Kubernetes_Environment_to_Rancher.pngbin0 -> 49242 bytes
-rw-r--r--docs/Apply_customization_script_for_the_Rancher_VM.jpegbin0 -> 153028 bytes
-rw-r--r--docs/Click_create.jpegbin0 -> 131952 bytes
-rw-r--r--docs/Click_on_CLI_and_then_click_on_Generate_Config.jpegbin0 -> 91262 bytes
-rw-r--r--docs/Click_on_Close_button.jpegbin0 -> 158513 bytes
-rw-r--r--docs/Click_on_Copy_to_Clipboard-wait_until_you_see_a_token-do_not_copy_user+password-the_server_is_not_ready_at_that_point.jpegbin0 -> 92239 bytes
-rw-r--r--docs/K8s-Assign_Floating_IP_for_external_access.jpegbin0 -> 64999 bytes
-rw-r--r--docs/K8s-Key_Pair.jpegbin0 -> 87469 bytes
-rw-r--r--docs/K8s-Launch_Instance.jpegbin0 -> 53520 bytes
-rw-r--r--docs/K8s-Launch_new_VM_instance_to_create_a_Kubernetes_single_host_or_cluster.jpegbin0 -> 84468 bytes
-rw-r--r--docs/K8s-Manage_Floating_IP_Associations.jpegbin0 -> 33961 bytes
-rw-r--r--docs/K8s-Networking.jpegbin0 -> 96431 bytes
-rw-r--r--docs/K8s-Security_Group.jpegbin0 -> 73732 bytes
-rw-r--r--docs/K8s-Select_Flavor.jpegbin0 -> 148973 bytes
-rw-r--r--docs/K8s-Select_Ubuntu_16.04_as_base_image.jpegbin0 -> 136070 bytes
-rw-r--r--docs/Rancher-Allocate_Floating_IP.jpegbin0 -> 32404 bytes
-rw-r--r--docs/Rancher-Key_Pair.jpegbin0 -> 90739 bytes
-rw-r--r--docs/Rancher-Launch_Instance.jpegbin0 -> 51541 bytes
-rw-r--r--docs/Rancher-Launch_new_VM_instance_to_host_the_Rancher_Server.jpegbin0 -> 448601 bytes
-rw-r--r--docs/Rancher-Manage_Floating_IP_Associations.jpegbin0 -> 33712 bytes
-rw-r--r--docs/Rancher-Networking.jpegbin0 -> 93256 bytes
-rw-r--r--docs/Rancher-Security_Groups.jpegbin0 -> 77292 bytes
-rw-r--r--docs/Rancher-Select_Flavor.jpegbin0 -> 142727 bytes
-rw-r--r--docs/Rancher-Select_Ubuntu_16.04_as_base_image.jpegbin0 -> 139303 bytes
-rw-r--r--docs/Return_to_Rancher_environment_eg_SB4_and_wait_for_services_to_complete_10-15_mins.jpegbin0 -> 60818 bytes
-rw-r--r--docs/Select_Add_Environment.pngbin0 -> 38073 bytes
-rw-r--r--docs/and_click_on_Save_accept_defaults.jpegbin0 -> 77083 bytes
-rw-r--r--docs/index.rst1
-rw-r--r--docs/k8s-topology.jpgbin0 -> 57806 bytes
-rw-r--r--docs/master_nfs_node.sh32
-rw-r--r--docs/oom_cloud_setup_guide.rst49
-rw-r--r--docs/oom_quickstart_guide.rst9
-rw-r--r--docs/oom_setup_kubernetes_rancher.rst478
-rw-r--r--docs/oom_user_guide.rst6
-rw-r--r--docs/openstack-k8s-node.sh46
-rw-r--r--docs/openstack-rancher.sh51
-rw-r--r--docs/otherwise_select_INFRASTRUCTURE_Hosts_and_click_on_Add_Host.jpgbin0 -> 46511 bytes
-rw-r--r--docs/release-notes.rst154
-rw-r--r--docs/slave_nfs_node.sh25
-rw-r--r--kubernetes/aaf/charts/aaf-cm/templates/deployment.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-cm/values.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-cs/templates/deployment.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-cs/values.yaml4
-rw-r--r--kubernetes/aaf/charts/aaf-fs/templates/deployment.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-fs/values.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-gui/templates/deployment.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-gui/values.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-hello/templates/deployment.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-hello/values.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-locate/templates/deployment.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-locate/values.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-oauth/templates/deployment.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-oauth/values.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-service/templates/deployment.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-service/values.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml2
-rw-r--r--kubernetes/aaf/charts/aaf-sms/values.yaml2
-rw-r--r--kubernetes/aaf/resources/config/local/org.osaaf.aaf.p12bin4140 -> 4180 bytes
-rw-r--r--kubernetes/aaf/resources/config/local/org.osaaf.aaf_new-24e41f2f436018568cbdecdc1edbd605.p12bin4140 -> 0 bytes
-rw-r--r--kubernetes/aaf/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-babel/resources/config/artifact-generator.properties3
-rw-r--r--kubernetes/aai/charts/aai-babel/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-champ/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-data-router/resources/config/schemaIngest.properties31
-rw-r--r--kubernetes/aai/charts/aai-data-router/resources/dynamic/conf/entity-event-policy.xml4
-rw-r--r--kubernetes/aai/charts/aai-data-router/templates/configmap.yaml2
-rw-r--r--kubernetes/aai/charts/aai-data-router/templates/deployment.yaml10
-rw-r--r--kubernetes/aai/charts/aai-data-router/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-elasticsearch/templates/deployment.yaml1
-rw-r--r--kubernetes/aai/charts/aai-elasticsearch/templates/service.yaml2
-rw-r--r--kubernetes/aai/charts/aai-gizmo/resources/config/log/logback.xml384
-rw-r--r--kubernetes/aai/charts/aai-gizmo/resources/config/schemaIngest.properties31
-rw-r--r--kubernetes/aai/charts/aai-gizmo/templates/deployment.yaml5
-rw-r--r--kubernetes/aai/charts/aai-gizmo/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-modelloader/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-resources/resources/config/application.properties2
-rw-r--r--kubernetes/aai/charts/aai-resources/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-search-data/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml8
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml25
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/templates/service.yaml3
-rw-r--r--kubernetes/aai/charts/aai-sparky-be/values.yaml7
-rw-r--r--kubernetes/aai/charts/aai-traversal/values.yaml2
-rw-r--r--kubernetes/aai/resources/config/aai/aai_keystorebin5163 -> 4929 bytes
-rw-r--r--kubernetes/aai/resources/config/haproxy/haproxy.cfg8
-rw-r--r--kubernetes/aai/values.yaml2
-rw-r--r--kubernetes/appc/charts/appc-cdt/templates/deployment.yaml7
-rw-r--r--kubernetes/appc/charts/appc-cdt/values.yaml2
-rwxr-xr-xkubernetes/appc/resources/config/appc/opt/onap/appc/bin/health_check.sh15
-rwxr-xr-xkubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh15
-rw-r--r--kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaa-app-config.xml120
-rw-r--r--kubernetes/appc/templates/service.yaml7
-rw-r--r--kubernetes/appc/templates/statefulset.yaml13
-rw-r--r--kubernetes/appc/values.yaml12
-rw-r--r--kubernetes/clamp/charts/clamp-dash-es/values.yaml16
-rw-r--r--kubernetes/clamp/charts/clamp-dash-kibana/values.yaml17
-rw-r--r--kubernetes/clamp/charts/clamp-dash-logstash/values.yaml16
-rw-r--r--kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf8
-rw-r--r--kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql7
-rw-r--r--kubernetes/clamp/charts/mariadb/values.yaml16
-rw-r--r--kubernetes/clamp/resources/config/log/filebeat/filebeat.yml (renamed from kubernetes/config/docker/init/src/config/log/filebeat/logback/filebeat.yml)2
-rw-r--r--kubernetes/clamp/resources/config/logback.xml236
-rw-r--r--kubernetes/clamp/templates/configmap.yaml8
-rw-r--r--kubernetes/clamp/templates/deployment.yaml29
-rw-r--r--kubernetes/clamp/values.yaml29
-rw-r--r--kubernetes/cli/values.yaml2
-rw-r--r--kubernetes/common/dgbuilder/resources/config/svclogic.properties2
-rwxr-xr-xkubernetes/common/dgbuilder/resources/scripts/createReleaseDir.sh4
-rw-r--r--kubernetes/common/dgbuilder/resources/scripts/customSettings.js8
-rw-r--r--kubernetes/common/dgbuilder/values.yaml6
-rw-r--r--kubernetes/config/.helmignore25
-rwxr-xr-xkubernetes/config/createConfig.sh47
-rw-r--r--kubernetes/config/docker/init/Dockerfile17
-rwxr-xr-xkubernetes/config/docker/init/config-init.sh127
-rwxr-xr-xkubernetes/config/docker/init/make-tar.sh18
-rw-r--r--kubernetes/config/docker/init/src/config/log/filebeat/log4j/filebeat.yml49
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/.kafka_cleanshutdown0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/.lock0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-0/00000000000000000000.logbin86 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-1/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-1/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST1-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST1-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-1/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-1/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-0/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-1/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-1/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-INBOX-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-INBOX-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-APP1-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-APP1-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-DBC1-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-DBC1-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-POL1-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-POL1-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-SDC1-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-SDC1-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-VID1-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-VID1-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-0/00000000000000000000.logbin86 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-1/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-1/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-0/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-1/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-1/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.log0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-0/00000000000000000000.logbin5150 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-1/00000000000000000000.indexbin48 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-1/00000000000000000000.logbin34764 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/recovery-point-offset-checkpoint27
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/replication-offset-checkpoint27
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-0/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-1/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-1/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-0/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-0/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-1/00000000000000000000.index0
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-1/00000000000000000000.logbin43 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.1bin67108880 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.103bin67108880 -> 0 bytes
-rw-r--r--kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.125bin67108880 -> 0 bytes
-rw-r--r--kubernetes/config/onap-parameters-sample.yaml29
-rw-r--r--kubernetes/config/templates/pod.yaml52
-rw-r--r--kubernetes/consul/charts/consul-server/templates/service.yaml1
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-cluster-health.sh58
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-cluster-health.json14
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json2
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json2
-rw-r--r--kubernetes/consul/templates/deployment.yaml6
-rw-r--r--kubernetes/consul/values.yaml10
-rw-r--r--kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml20
-rw-r--r--kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml2
-rw-r--r--kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml2
-rwxr-xr-xkubernetes/dcaegen2/charts/dcae-redis/resources/redis/scripts/redis-cluster-config.sh4
-rw-r--r--kubernetes/dcaegen2/charts/dcae-redis/values.yaml2
-rw-r--r--kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml2
-rw-r--r--kubernetes/dmaap/charts/message-router/charts/message-router-kafka/values.yaml2
-rw-r--r--kubernetes/dmaap/charts/message-router/charts/message-router-zookeeper/values.yaml2
-rw-r--r--kubernetes/esr/charts/esr-gui/values.yaml2
-rw-r--r--kubernetes/esr/values.yaml2
-rw-r--r--kubernetes/log/charts/log-logstash/resources/config/logstash.yml1
-rw-r--r--kubernetes/msb/charts/kube2msb/values.yaml3
-rw-r--r--kubernetes/msb/charts/msb-consul/templates/deployment.yaml2
-rw-r--r--kubernetes/msb/charts/msb-consul/templates/service.yaml4
-rw-r--r--kubernetes/msb/charts/msb-consul/values.yaml1
-rw-r--r--kubernetes/msb/charts/msb-discovery/templates/deployment.yaml2
-rw-r--r--kubernetes/msb/charts/msb-discovery/templates/service.yaml4
-rw-r--r--kubernetes/msb/charts/msb-discovery/values.yaml3
-rw-r--r--kubernetes/msb/charts/msb-eag/templates/deployment.yaml2
-rw-r--r--kubernetes/msb/charts/msb-eag/templates/service.yaml8
-rw-r--r--kubernetes/msb/charts/msb-eag/values.yaml3
-rw-r--r--kubernetes/msb/charts/msb-iag/templates/deployment.yaml2
-rw-r--r--kubernetes/msb/charts/msb-iag/templates/service.yaml8
-rw-r--r--kubernetes/msb/charts/msb-iag/values.yaml3
-rw-r--r--kubernetes/multicloud/charts/multicloud-ocata/templates/deployment.yaml2
-rw-r--r--kubernetes/multicloud/charts/multicloud-ocata/templates/service.yaml4
-rw-r--r--kubernetes/multicloud/charts/multicloud-ocata/values.yaml5
-rw-r--r--kubernetes/multicloud/charts/multicloud-vio/templates/deployment.yaml2
-rw-r--r--kubernetes/multicloud/charts/multicloud-vio/templates/service.yaml4
-rw-r--r--kubernetes/multicloud/charts/multicloud-vio/values.yaml5
-rw-r--r--kubernetes/multicloud/charts/multicloud-windriver/templates/deployment.yaml2
-rw-r--r--kubernetes/multicloud/charts/multicloud-windriver/templates/service.yaml4
-rw-r--r--kubernetes/multicloud/charts/multicloud-windriver/values.yaml5
-rw-r--r--kubernetes/multicloud/values.yaml5
-rw-r--r--kubernetes/nbi/templates/deployment.yaml2
-rw-r--r--kubernetes/nbi/values.yaml4
-rw-r--r--kubernetes/onap/values.yaml9
-rwxr-xr-xkubernetes/oof/charts/oof-has/charts/oof-has-data/templates/deployment.yaml8
-rw-r--r--kubernetes/oof/charts/oof-has/charts/oof-has-music/values.yaml2
-rwxr-xr-xkubernetes/oof/charts/oof-has/resources/config/AAF_RootCA.cer31
-rwxr-xr-xkubernetes/oof/charts/oof-has/resources/config/conductor.conf45
-rwxr-xr-xkubernetes/oof/charts/oof-has/values.yaml10
-rwxr-xr-xkubernetes/oof/resources/config/osdf_config.yaml21
-rw-r--r--kubernetes/oof/values.yaml6
-rw-r--r--kubernetes/policy/charts/brmsgw/values.yaml2
-rw-r--r--kubernetes/policy/charts/drools/charts/nexus/values.yaml4
-rw-r--r--kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh11
-rw-r--r--kubernetes/policy/charts/drools/values.yaml8
-rw-r--r--kubernetes/policy/charts/mariadb/values.yaml4
-rw-r--r--kubernetes/policy/charts/pdp/values.yaml4
-rw-r--r--kubernetes/policy/charts/policy-apex-pdp/Chart.yaml22
-rw-r--r--kubernetes/policy/charts/policy-apex-pdp/requirements.yaml22
-rw-r--r--kubernetes/policy/charts/policy-apex-pdp/resources/config/config.json78
-rw-r--r--kubernetes/policy/charts/policy-apex-pdp/templates/NOTES.txt37
-rw-r--r--kubernetes/policy/charts/policy-apex-pdp/templates/configmap.yaml26
-rw-r--r--kubernetes/policy/charts/policy-apex-pdp/templates/service.yaml44
-rw-r--r--kubernetes/policy/charts/policy-apex-pdp/templates/statefulset.yaml99
-rw-r--r--kubernetes/policy/charts/policy-apex-pdp/values.yaml68
-rw-r--r--kubernetes/policy/values.yaml6
-rw-r--r--kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/logback.xml7
-rw-r--r--kubernetes/portal/charts/portal-app/values.yaml2
-rw-r--r--kubernetes/portal/charts/portal-mariadb/values.yaml6
-rw-r--r--kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml10
-rw-r--r--kubernetes/portal/charts/portal-sdk/templates/service.yaml2
-rw-r--r--kubernetes/portal/charts/portal-sdk/values.yaml6
-rw-r--r--kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties2
-rw-r--r--kubernetes/portal/charts/portal-widget/values.yaml2
-rw-r--r--kubernetes/portal/docker/init/mariadb-client/Dockerfile2
-rwxr-xr-xkubernetes/robot/resources/config/eteshare/config/vm_properties.py4
-rw-r--r--kubernetes/robot/templates/deployment.yaml11
-rw-r--r--kubernetes/robot/templates/pv.yaml (renamed from kubernetes/vid/charts/mariadb/templates/pv.yaml)0
-rw-r--r--kubernetes/robot/templates/pvc.yaml (renamed from kubernetes/vid/charts/mariadb/templates/pvc.yaml)0
-rw-r--r--kubernetes/robot/values.yaml30
-rw-r--r--kubernetes/sdc/charts/sdc-act/.helmignore (renamed from kubernetes/vid/charts/mariadb/.helmignore)0
-rw-r--r--kubernetes/sdc/charts/sdc-act/Chart.yaml (renamed from kubernetes/config/Chart.yaml)6
-rw-r--r--kubernetes/sdc/charts/sdc-act/resources/config/configuration.yaml12
-rw-r--r--kubernetes/sdc/charts/sdc-act/templates/NOTES.txt33
-rw-r--r--kubernetes/sdc/charts/sdc-act/templates/configmap.yaml (renamed from kubernetes/vid/charts/mariadb/templates/configmap.yaml)4
-rw-r--r--kubernetes/sdc/charts/sdc-act/templates/deployment.yaml (renamed from kubernetes/vid/charts/mariadb/templates/deployment.yaml)87
-rw-r--r--kubernetes/sdc/charts/sdc-act/templates/job.yaml64
-rw-r--r--kubernetes/sdc/charts/sdc-act/templates/service.yaml39
-rw-r--r--kubernetes/sdc/charts/sdc-act/values.yaml (renamed from kubernetes/vid/charts/mariadb/values.yaml)73
-rw-r--r--kubernetes/sdc/charts/sdc-be/values.yaml4
-rw-r--r--kubernetes/sdc/charts/sdc-cs/values.yaml4
-rw-r--r--kubernetes/sdc/charts/sdc-es/values.yaml4
-rw-r--r--kubernetes/sdc/charts/sdc-fe/values.yaml2
-rw-r--r--kubernetes/sdc/charts/sdc-kb/values.yaml2
-rw-r--r--kubernetes/sdc/charts/sdc-onboarding-be/values.yaml4
-rw-r--r--kubernetes/sdc/charts/sdc-wfd/values.yaml2
-rw-r--r--kubernetes/sdnc/Makefile51
-rw-r--r--kubernetes/sdnc/charts/dmaap-listener/values.yaml2
-rw-r--r--kubernetes/sdnc/charts/sdnc-ansible-server/values.yaml4
-rw-r--r--kubernetes/sdnc/charts/sdnc-portal/resources/config/admportal.json2
-rw-r--r--kubernetes/sdnc/charts/sdnc-portal/resources/config/dblib.properties2
-rw-r--r--kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties2
-rw-r--r--kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties.sdnctldb022
-rw-r--r--kubernetes/sdnc/charts/sdnc-portal/values.yaml5
-rw-r--r--kubernetes/sdnc/charts/ueb-listener/values.yaml2
-rw-r--r--kubernetes/sdnc/requirements.yaml6
-rw-r--r--kubernetes/sdnc/resources/config/bin/installSdncDb.sh3
-rwxr-xr-xkubernetes/sdnc/resources/config/bin/startODL.sh25
-rw-r--r--kubernetes/sdnc/resources/config/conf/dblib.properties2
-rw-r--r--kubernetes/sdnc/resources/config/conf/lcm-dg.properties6
-rw-r--r--kubernetes/sdnc/resources/config/conf/svclogic.properties2
-rw-r--r--kubernetes/sdnc/resources/env.yaml19
-rwxr-xr-xkubernetes/sdnc/resources/geo/bin/sdnc.cluster36
-rwxr-xr-xkubernetes/sdnc/resources/geo/bin/sdnc.failover65
-rwxr-xr-xkubernetes/sdnc/resources/geo/bin/sdnc.isPrimaryCluster18
-rwxr-xr-xkubernetes/sdnc/resources/geo/bin/sdnc.makeActive45
-rwxr-xr-x[-rw-r--r--]kubernetes/sdnc/resources/geo/bin/sdnc.monitor (renamed from kubernetes/config/onap-parameters.yaml)42
-rwxr-xr-xkubernetes/sdnc/resources/geo/bin/switchVoting.sh46
-rw-r--r--kubernetes/sdnc/sdnc-prom/Chart.yaml (renamed from kubernetes/config/values.yaml)12
-rw-r--r--kubernetes/sdnc/sdnc-prom/requirements.yaml18
-rwxr-xr-xkubernetes/sdnc/sdnc-prom/resources/bin/ensureSdncActive.sh105
-rwxr-xr-xkubernetes/sdnc/sdnc-prom/resources/bin/ensureSdncStandby.sh58
-rwxr-xr-xkubernetes/sdnc/sdnc-prom/resources/bin/prom.sh31
-rwxr-xr-xkubernetes/sdnc/sdnc-prom/resources/bin/sdnc.cluster61
-rwxr-xr-xkubernetes/sdnc/sdnc-prom/resources/bin/sdnc.dnsswitch22
-rwxr-xr-xkubernetes/sdnc/sdnc-prom/resources/bin/sdnc.failover86
-rwxr-xr-xkubernetes/sdnc/sdnc-prom/resources/bin/sdnc.monitor125
-rwxr-xr-xkubernetes/sdnc/sdnc-prom/resources/bin/switchVoting.sh110
-rw-r--r--kubernetes/sdnc/sdnc-prom/resources/config/config.json19
-rw-r--r--kubernetes/sdnc/sdnc-prom/resources/config/healthchecks.json1
-rw-r--r--kubernetes/sdnc/sdnc-prom/templates/configmap.yaml (renamed from kubernetes/vid/charts/mariadb/templates/secrets.yaml)23
-rw-r--r--kubernetes/sdnc/sdnc-prom/templates/deployment.yaml95
-rw-r--r--kubernetes/sdnc/sdnc-prom/values.yaml102
-rw-r--r--kubernetes/sdnc/templates/configmap.yaml8
-rw-r--r--kubernetes/sdnc/templates/secrets.yaml14
-rw-r--r--kubernetes/sdnc/templates/statefulset.yaml5
-rw-r--r--kubernetes/sdnc/values.yaml9
-rw-r--r--kubernetes/sniro-emulator/values.yaml2
-rw-r--r--kubernetes/so/charts/mariadb/values.yaml6
-rwxr-xr-xkubernetes/so/resources/config/mso/mso-docker.json8
-rw-r--r--kubernetes/so/values.yaml2
-rw-r--r--kubernetes/uui/charts/uui-server/values.yaml6
-rw-r--r--kubernetes/uui/values.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-catalog/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-catalog/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-ems-driver/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-ems-driver/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-generic-vnfm-driver/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-generic-vnfm-driver/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-huawei-vnfm-driver/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-huawei-vnfm-driver/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-juju-vnfm-driver/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-juju-vnfm-driver/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-multivim-proxy/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-multivim-proxy/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-nokia-vnfm-driver/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-nslcm/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-nslcm/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-resmgr/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-resmgr/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-vnflcm/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-vnflcm/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-vnfmgr/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-vnfmgr/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-vnfres/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-vnfres/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-workflow-engine/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-workflow-engine/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-workflow/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-workflow/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-zte-sdnc-driver/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-zte-sdnc-driver/values.yaml5
-rw-r--r--kubernetes/vfc/charts/vfc-zte-vnfm-driver/templates/deployment.yaml2
-rw-r--r--kubernetes/vfc/charts/vfc-zte-vnfm-driver/values.yaml5
-rw-r--r--kubernetes/vid/charts/mariadb/resources/config/lf_config/vid-my.cnf192
-rw-r--r--kubernetes/vid/charts/mariadb/resources/config/lf_config/vid-pre-init.sql28
-rw-r--r--kubernetes/vid/charts/mariadb/templates/NOTES.txt11
-rw-r--r--kubernetes/vid/charts/vid-galera/Chart.yaml11
-rw-r--r--kubernetes/vid/charts/vid-galera/templates/NOTES.txt12
-rw-r--r--kubernetes/vid/charts/vid-galera/templates/configmap.yaml21
-rw-r--r--kubernetes/vid/charts/vid-galera/templates/pv.yaml37
-rw-r--r--kubernetes/vid/charts/vid-galera/templates/pvc.yaml48
-rw-r--r--kubernetes/vid/charts/vid-galera/templates/secrets.yaml14
-rw-r--r--kubernetes/vid/charts/vid-galera/templates/service.yaml20
-rw-r--r--kubernetes/vid/charts/vid-galera/templates/statefulset.yaml120
-rw-r--r--kubernetes/vid/charts/vid-galera/values.yaml118
-rw-r--r--kubernetes/vid/templates/check-job-completion-configmap.yaml83
-rw-r--r--kubernetes/vid/templates/cluster-ready-configmap.yaml89
-rw-r--r--kubernetes/vid/templates/dbcmd-configmap.yaml11
-rw-r--r--kubernetes/vid/templates/deployment.yaml28
-rw-r--r--kubernetes/vid/templates/galera-sql-configmap.yaml21
-rw-r--r--kubernetes/vid/templates/vid-galera-config-job.yaml70
-rw-r--r--kubernetes/vid/values.yaml16
-rw-r--r--kubernetes/vnfsdk/charts/vnfsdk-postgres/Chart.yaml (renamed from kubernetes/vid/charts/mariadb/Chart.yaml)4
-rw-r--r--kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/deployment.yaml44
-rw-r--r--kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/service.yaml (renamed from kubernetes/vid/charts/mariadb/templates/service.yaml)9
-rw-r--r--kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml63
-rw-r--r--kubernetes/vnfsdk/requirements.yaml3
-rw-r--r--kubernetes/vnfsdk/templates/deployment.yaml5
-rw-r--r--kubernetes/vnfsdk/values.yaml17
-rw-r--r--message-router-blueprint.yaml532
408 files changed, 4654 insertions, 4561 deletions
diff --git a/INFO.yaml b/INFO.yaml
index b5b6f9f3f8..d6393366e9 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -3,11 +3,11 @@ project: 'oom'
project_creation_date: '2017-06-15'
lifecycle_state: 'Incubation'
project_lead: &onap_releng_ptl
- name: 'David Sauvageau'
- email: 'david.sauvageau@bell.ca'
- id: 'david.sauvageau'
- company: 'Bell Canada'
- timezone: 'Canada/Montreal'
+ name: 'Mike Elliott'
+ email: 'mike.elliott@amdocs.com'
+ id: 'melliott'
+ company: 'Amdocs'
+ timezone: 'Canada/Ontario'
primary_contact: *onap_releng_ptl
issue_tracking:
type: 'jira'
diff --git a/TOSCA/VM/MessageRouter/MessageRouter.yaml b/TOSCA/VM/MessageRouter/MessageRouter.yaml
deleted file mode 100644
index 4ae5b52167..0000000000
--- a/TOSCA/VM/MessageRouter/MessageRouter.yaml
+++ /dev/null
@@ -1,259 +0,0 @@
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright (c) 2017 AT&T
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#============LICENSE_END============================================
-
-###########################################################
-# This Blueprint installs ONAP Message Router on Openstack
-###########################################################
-
-tosca_definitions_version: cloudify_dsl_1_3
-
-description: >
- This blueprint creates a ONAP Message Router
-
-imports:
- - http://www.getcloudify.org/spec/cloudify/4.1/types.yaml
- - http://cloudify.co/spec/openstack-plugin/2.2.0/plugin.yaml
- - http://cloudify.co/spec/fabric-plugin/1.5/plugin.yaml
- - http://www.getcloudify.org/spec/diamond-plugin/1.3.3/plugin.yaml
-
-inputs:
- nexus_repo:
- type: string
- default: https://nexus.onap.org/content/sites/raw
-
- docker_repo:
- type: string
- default: nexus3.onap.org:10001
-
- nexus_username:
- type: string
- default: docker
-
- nexus_password:
- type: string
- default: docker
-
- artifacts_version:
- type: string
- default: 1.1.0-SNAPSHOT
-
- dns_ip:
- type: string
-
- cloud_env:
- type: string
- default: openstack
-
- gerrit_branch:
- type: string
- default: master
-
- external_dns:
- type: string
-
- mr_repo:
- type: string
- default: http://gerrit.onap.org/r/dcae/demo/startup/message-router.git
-
- vm_instancename:
- type: string
-
- image:
- description: |
- A Ubuntu 14.04 image, along with the usual
- type: string
- default: Ubuntu 14.04 LTS Generic
-
- flavor:
- description: >
- A machine type with more than 2 CPUs, 4096 GB RAM, and 8 GB of disk space.
- You might want to use 4 CPUs, 8192 MB RAM for the master.
- type: string
- default: m1.xlarge
-
- ssh_user:
- description: |
- User for connecting to agent VM
- type: string
- default: ubuntu
-
- security_group:
- description: |
- default
- type: string
- default: open
-
- keypair_name:
- description: |
- OpenStack keypair name
- type: string
-
- key_filename:
- type: string
-
- external_network:
- type: string
-
- app_subnet:
- type: string
-
- openstack_username:
- type: string
- description: username to authenticate to OpenStack
-
- openstack_password:
- type: string
- description: OpenStack tenant password for openstack_username user
-
- openstack_tenant_name:
- type: string
- description: OpenStack tenant for VM deploy
-
- openstack_auth_url:
- type: string
- description: Authentication URL for OpenStack
-
- openstack_region:
- type: string
- description: OpenStack region
-
-dsl_definitions:
- openstack_credentials: &openstack_credentials
- username: { get_input: openstack_username }
- password: { get_input: openstack_password }
- tenant_name: { get_input: openstack_tenant_name }
- auth_url: { get_input: openstack_auth_url }
- region: { get_input: openstack_region }
-
-node_templates:
-
- key_pair:
- type: cloudify.openstack.nodes.KeyPair
- properties:
- private_key_path: { get_input: key_filename }
- use_external_resource: true
- resource_id: { get_input: keypair_name }
- openstack_config: *openstack_credentials
-
- private_net:
- type: cloudify.openstack.nodes.Network
- properties:
- use_external_resource: true
- resource_id: { get_input: app_subnet }
- openstack_config: *openstack_credentials
-
- external_network:
- type: cloudify.openstack.nodes.Network
- properties:
- openstack_config: *openstack_credentials
- use_external_resource: true
- resource_id: { get_input: external_network}
-
- app_security_group:
- type: cloudify.openstack.nodes.SecurityGroup
- properties:
- openstack_config: *openstack_credentials
- use_external_resource: true
- resource_id: { get_input: security_group }
-
- VM_001_fixed_ip:
- type: cloudify.openstack.nodes.Port
- properties:
- openstack_config: *openstack_credentials
- relationships:
- - type: cloudify.relationships.contained_in
- target: private_net
- - type: cloudify.openstack.port_connected_to_security_group
- target: app_security_group
-
- VM_001_floating_ip:
- type: cloudify.openstack.nodes.FloatingIP
- properties:
- openstack_config: *openstack_credentials
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- inputs:
- args:
- floating_network_name: { get_input: external_network }
-
- VM_001:
- type: cloudify.openstack.nodes.Server
- properties:
- openstack_config: *openstack_credentials
- install_agent: false
- image: { get_input: image }
- flavor: { get_input: flavor }
- resource_id: { get_input: vm_instancename }
- management_network_name: { get_input: app_subnet }
- ip: { get_attribute: [VM_001_floating_ip, floating_ip_address] }
- relationships:
- - type: cloudify.openstack.server_connected_to_floating_ip
- target: VM_001_floating_ip
- - type: cloudify.openstack.server_connected_to_keypair
- target: key_pair
- - type: cloudify.openstack.server_connected_to_port
- target: VM_001_fixed_ip
-
-
- app_001:
- type: cloudify.nodes.SoftwareComponent
- properties:
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- implementation: fabric.fabric_plugin.tasks.run_script
- inputs:
- script_path: scripts/messagerouter.sh
- use_sudo: true
- process:
- args:
- [{ get_attribute: [VM_001_floating_ip, floating_ip_address]},
- { get_attribute: [VM_001_fixed_ip, fixed_ip_address]},
- { get_input: nexus_repo },
- { get_input: docker_repo},
- { get_input: nexus_username },
- { get_input: nexus_password },
- { get_input: artifacts_version },
- { get_input: dns_ip},
- { get_input: gerrit_branch },
- openstack,
- { get_input: external_dns},
- { get_input: mr_repo}]
- fabric_env:
- host_string: { get_attribute: [VM_001_floating_ip, floating_ip_address]}
- user: { get_input: ssh_user }
- key_filename: { get_input: key_filename }
- relationships:
- - type: cloudify.relationships.contained_in
- target: VM_001
- - type: cloudify.relationships.depends_on
- target: VM_001_floating_ip
-
-outputs:
- ONAP_Message_Router:
- description: informations about Messgae Router
- value:
- ip: { get_attribute: [VM_001_floating_ip, floating_ip_address] }
-
-
-
-
-
-
-
-
diff --git a/TOSCA/VM/MessageRouter/scripts/messagerouter.sh b/TOSCA/VM/MessageRouter/scripts/messagerouter.sh
deleted file mode 100644
index 081cb0bebd..0000000000
--- a/TOSCA/VM/MessageRouter/scripts/messagerouter.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/sh
-
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright (c) 2017 AT&T
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#============LICENSE_END============================================
-
-#
-PUBIP="$1"
-PVTIP="$2"
-NEXUS_REPO="$3"
-DOCKER_REPO="$4"
-NEXUS_USERNAME="$5"
-NEXUS_PASSWORD="$6"
-ARTIFACTS_VERSION="$7"
-DNS_IP="$8"
-GERRIT_BRANCH="$9"
-CLOUD_ENV="$10"
-EXETERNAL_DNS="$11"
-MR_REPO="$12"
-
-export HOSTNAME=`hostname`
-echo 127.0.1.1 $HOSTNAME >>/etc/hosts
-echo $PVTIP $HOSTNAME >>/etc/hosts
-echo $PUBIP $HOSTNAME >>/etc/hosts
-
-
-mkdir /opt/config
-chmod 777 /opt/config
-echo $PUBIP > /opt/config/public_ip.txt
-echo $NEXUS_REPO > /opt/config/nexus_repo.txt
-echo $DOCKER_REPO > /opt/config/nexus_docker_repo.txt
-echo $NEXUS_USERNAME > /opt/config/nexus_username.txt
-echo $NEXUS_PASSWORD > /opt/config/nexus_password.txt
-echo $ARTIFACTS_VERSION > /opt/config/artifacts_version.txt
-echo $DNS_IP > /opt/config/dns_ip_addr.txt
-echo $GERRIT_BRANCH > /opt/config/gerrit_branch.txt
-echo $CLOUD_ENV > /opt/config/cloud_env.txt
-echo $EXETERNAL_DNS > /opt/config/external_dns.txt
-echo $MR_REPO > /opt/config/remote_repo.txt
-
-touch /opt/mr_install.sh
-chmod 777 /opt/mr_install.sh
-curl -k $NEXUS_REPO/org.onap.demo/boot/$ARTIFACTS_VERSION/mr_install.sh -o /opt/mr_install.sh;
-cd /opt
-chmod +x mr_install.sh
-/opt/mr_install.sh > mr_install.log 2>&1 \ No newline at end of file
diff --git a/TOSCA/VM/appc/appc.yaml b/TOSCA/VM/appc/appc.yaml
deleted file mode 100644
index 5a4cff7f30..0000000000
--- a/TOSCA/VM/appc/appc.yaml
+++ /dev/null
@@ -1,278 +0,0 @@
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright (c) 2017 AT&T
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#============LICENSE_END============================================
-
-###########################################################
-# This Blueprint installs ONAP appc on Openstack
-###########################################################
-
-tosca_definitions_version: cloudify_dsl_1_3
-
-description: >
- This blueprint creates a ONAP APP-C
-
-imports:
- - http://www.getcloudify.org/spec/cloudify/4.1/types.yaml
- - http://cloudify.co/spec/openstack-plugin/2.2.0/plugin.yaml
- - http://cloudify.co/spec/fabric-plugin/1.5/plugin.yaml
- - http://www.getcloudify.org/spec/diamond-plugin/1.3.3/plugin.yaml
-
-inputs:
- nexus_repo:
- type: string
- default: https://nexus.onap.org/content/sites/raw
-
- docker_repo:
- type: string
- default: nexus3.onap.org:10001
-
- nexus_username:
- type: string
- default: docker
-
- nexus_password:
- type: string
- default: docker
-
- artifacts_version:
- type: string
- default: 1.1.0-SNAPSHOT
-
- dns_ip:
- type: string
-
- cloud_env:
- type: string
- default: openstack
-
- docker_version:
- type: string
- default: v1.2.0
-
- gerrit_branch:
- type: string
- default: master
-
- external_dns:
- type: string
-
- dmaap_topic:
- type: string
- default: AUTO
-
- appc_repo:
- type: string
- default: http://gerrit.onap.org/r/appc/deployment.git
-
- vm_instancename:
- type: string
-
- image:
- description: |
- A Ubuntu 14.04 image, along with the usual
- type: string
- default: Ubuntu 14.04 LTS Generic
-
- flavor:
- description: >
- A machine type with more than 2 CPUs, 4096 GB RAM, and 8 GB of disk space.
- You might want to use 4 CPUs, 8192 MB RAM for the master.
- type: string
- default: m1.xlarge
-
- ssh_user:
- description: |
- User for connecting to agent VM
- type: string
- default: ubuntu
-
- security_group:
- description: openstack security_group
- type: string
-
- keypair_name:
- description: |
- OpenStack keypair name
- type: string
-
- key_filename:
- type: string
-
- external_network:
- type: string
-
- app_subnet:
- type: string
-
-
- openstack_username:
- type: string
- description: username to authenticate to OpenStack
-
- openstack_password:
- type: string
- description: OpenStack tenant password for openstack_username user
-
- openstack_tenant_name:
- type: string
- description: OpenStack tenant for VM deploy
-
- openstack_auth_url:
- type: string
- description: Authentication URL for OpenStack
-
- openstack_region:
- type: string
- description: OpenStack region
-
- dgbuilder_docker:
- type: string
- default: v0.1.0
-
-dsl_definitions:
- openstack_credentials: &openstack_credentials
- username: { get_input: openstack_username }
- password: { get_input: openstack_password }
- tenant_name: { get_input: openstack_tenant_name }
- auth_url: { get_input: openstack_auth_url }
- region: { get_input: openstack_region }
-
-node_templates:
-
- key_pair:
- type: cloudify.openstack.nodes.KeyPair
- properties:
- private_key_path: { get_input: key_filename }
- use_external_resource: true
- resource_id: { get_input: keypair_name }
- openstack_config: *openstack_credentials
-
- private_net:
- type: cloudify.openstack.nodes.Network
- properties:
- use_external_resource: true
- resource_id: { get_input: app_subnet }
- openstack_config: *openstack_credentials
-
- external_network:
- type: cloudify.openstack.nodes.Network
- properties:
- openstack_config: *openstack_credentials
- use_external_resource: true
- resource_id: { get_input: external_network}
-
- app_security_group:
- type: cloudify.openstack.nodes.SecurityGroup
- properties:
- openstack_config: *openstack_credentials
- use_external_resource: true
- resource_id: { get_input: security_group }
-
- VM_001_fixed_ip:
- type: cloudify.openstack.nodes.Port
- properties:
- openstack_config: *openstack_credentials
- relationships:
- - type: cloudify.relationships.contained_in
- target: private_net
- - type: cloudify.openstack.port_connected_to_security_group
- target: app_security_group
-
- VM_001_floating_ip:
- type: cloudify.openstack.nodes.FloatingIP
- properties:
- openstack_config: *openstack_credentials
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- inputs:
- args:
- floating_network_name: { get_input: external_network }
-
- VM_001:
- type: cloudify.openstack.nodes.Server
- properties:
- openstack_config: *openstack_credentials
- install_agent: false
- image: { get_input: image }
- flavor: { get_input: flavor }
- resource_id: { get_input: vm_instancename }
- management_network_name: { get_input: app_subnet }
- ip: { get_attribute: [VM_001_floating_ip, floating_ip_address] }
- relationships:
- - type: cloudify.openstack.server_connected_to_floating_ip
- target: VM_001_floating_ip
- - type: cloudify.openstack.server_connected_to_keypair
- target: key_pair
- - type: cloudify.openstack.server_connected_to_port
- target: VM_001_fixed_ip
-
-
- app_001:
- type: cloudify.nodes.SoftwareComponent
- properties:
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- implementation: fabric.fabric_plugin.tasks.run_script
- inputs:
- script_path: scripts/appc.sh
- use_sudo: true
- process:
- args:
- [{ get_attribute: [VM_001_floating_ip, floating_ip_address]},
- { get_attribute: [VM_001_fixed_ip, fixed_ip_address]},
- { get_input: nexus_repo },
- { get_input: docker_repo},
- { get_input: nexus_username },
- { get_input: nexus_password },
- { get_input: artifacts_version },
- { get_input: dns_ip},
- { get_input: gerrit_branch },
- openstack,
- { get_input: external_dns},
- { get_input: dmaap_topic},
- { get_input: openstack_username},
- { get_input: openstack_tenant_name},
- { get_input: openstack_password},
- { get_input: openstack_region},
- { get_input: openstack_auth_url},
- { get_input: appc_repo},
- { get_input: docker_version },
- { get_input: dgbuilder_docker}]
- fabric_env:
- host_string: { get_attribute: [VM_001_floating_ip, floating_ip_address]}
- user: { get_input: ssh_user }
- key_filename: { get_input: key_filename }
- relationships:
- - type: cloudify.relationships.contained_in
- target: VM_001
- - type: cloudify.relationships.depends_on
- target: VM_001_floating_ip
-
-outputs:
- ONAP_Appc:
- description: informations about app-c
- value:
- ip: { get_attribute: [VM_001_floating_ip, floating_ip_address] }
-
-
-
-
-
-
-
-
diff --git a/TOSCA/VM/appc/scripts/appc.sh b/TOSCA/VM/appc/scripts/appc.sh
deleted file mode 100644
index 1ceebae12b..0000000000
--- a/TOSCA/VM/appc/scripts/appc.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/sh
-
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright (c) 2017 AT&T
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#============LICENSE_END============================================
-
-#
-PUBIP="$1"
-PVTIP="$2"
-NEXUS_REPO="$3"
-DOCKER_REPO="$4"
-NEXUS_USERNAME="$5"
-NEXUS_PASSWORD="$6"
-ARTIFACTS_VERSION="$7"
-DNS_IP="$8"
-GERRIT_BRANCH="$9"
-CLOUD_ENV="$10"
-EXETERNAL_DNS="$11"
-DMAAP_TOPIC="$12"
-OPENSTACK_USERNAME="$13"
-TENANT_ID="$14"
-OPENSTACK_API_KEY="$15"
-OPENSTACK_REGION="$16"
-KEYSTONE="$17"
-APPC_REPO="$18"
-DOCKER_VERSION="$19"
-DGBUILDER_DOCKER="$20"
-
-export HOSTNAME=`hostname`
-echo 127.0.1.1 $HOSTNAME >>/etc/hosts
-echo $PVTIP $HOSTNAME >>/etc/hosts
-echo $PUBIP $HOSTNAME >>/etc/hosts
-
-
-mkdir /opt/config
-chmod 777 /opt/config
-echo $PUBIP > /opt/config/public_ip.txt
-echo $NEXUS_REPO > /opt/config/nexus_repo.txt
-echo $DOCKER_REPO > /opt/config/nexus_docker_repo.txt
-echo $NEXUS_USERNAME > /opt/config/nexus_username.txt
-echo $NEXUS_PASSWORD > /opt/config/nexus_password.txt
-echo $DMAAP_TOPIC > /opt/config/dmaap_topic.txt
-echo $ARTIFACTS_VERSION > /opt/config/artifacts_version.txt
-echo $DNS_IP > /opt/config/dns_ip_addr.txt
-echo $DOCKER_VERSION > /opt/config/docker_version.txt
-echo $GERRIT_BRANCH > /opt/config/gerrit_branch.txt
-echo $DGBUILDER_DOCKER > /opt/config/dgbuilder_version.txt
-echo $CLOUD_ENV > /opt/config/cloud_env.txt
-echo $EXETERNAL_DNS > /opt/config/external_dns.txt
-echo $APPC_REPO > /opt/config/remote_repo.txt
-echo $OPENSTACK_USERNAME > /opt/config/openstack_username.txt
-echo $TENANT_ID > /opt/config/tenant_id.txt
-echo $OPENSTACK_API_KEY > /opt/config/openstack_api_key.txt
-echo $OPENSTACK_REGION > /opt/config/openstack_region.txt
-echo $KEYSTONE > /opt/config/keystone.txt
-
-touch /opt/appc_install.sh
-chmod 777 /opt/appc_install.sh
-curl -k $NEXUS_REPO/org.onap.demo/boot/$ARTIFACTS_VERSION/appc_install.sh -o /opt/appc_install.sh
-cd /opt
-chmod +x appc_install.sh
-/opt/appc_install.sh > appc_install.log 2>&1 \ No newline at end of file
diff --git a/TOSCA/VM/policy/Policy.yaml b/TOSCA/VM/policy/Policy.yaml
deleted file mode 100644
index 76ff563084..0000000000
--- a/TOSCA/VM/policy/Policy.yaml
+++ /dev/null
@@ -1,263 +0,0 @@
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright (c) 2017 AT&T
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#============LICENSE_END============================================
-
-###########################################################
-# This Blueprint installs ONAP policy on Openstack
-###########################################################
-
-tosca_definitions_version: cloudify_dsl_1_3
-
-description: >
- This blueprint creates a ONAP policy
-
-imports:
- - http://www.getcloudify.org/spec/cloudify/4.1/types.yaml
- - http://cloudify.co/spec/openstack-plugin/2.2.0/plugin.yaml
- - http://cloudify.co/spec/fabric-plugin/1.5/plugin.yaml
- - http://www.getcloudify.org/spec/diamond-plugin/1.3.3/plugin.yaml
-
-inputs:
- nexus_repo:
- type: string
- default: https://nexus.onap.org/content/sites/raw
-
- docker_repo:
- type: string
- default: nexus3.onap.org:10001
-
- nexus_username:
- type: string
- default: docker
-
- nexus_password:
- type: string
- default: docker
-
- artifacts_version:
- type: string
- default: 1.1.0-SNAPSHOT
-
- dns_ip:
- type: string
-
- cloud_env:
- type: string
- default: openstack
-
- docker_version:
- type: string
- default: v1.1.1
-
- gerrit_branch:
- type: string
- default: master
-
- external_dns:
- type: string
-
- policy_repo:
- type: string
- default: http://gerrit.onap.org/r/policy/docker.git
-
- vm_instancename:
- type: string
-
- image:
- description: |
- A Ubuntu 14.04 image, along with the usual
- type: string
- default: Ubuntu 16.04 LTS Generic
-
- flavor:
- description: >
- A machine type with more than 2 CPUs, 4096 GB RAM, and 8 GB of disk space.
- You might want to use 4 CPUs, 8192 MB RAM for the master.
- type: string
- default: m1.xlarge
-
- ssh_user:
- description: |
- User for connecting to agent VM
- type: string
- default: ubuntu
-
- security_group:
- description: openstack security_group
- type: string
-
- keypair_name:
- description: |
- OpenStack keypair name
- type: string
-
- key_filename:
- type: string
-
- external_network:
- type: string
-
- app_subnet:
- type: string
-
-
- openstack_username:
- type: string
- description: username to authenticate to OpenStack
-
- openstack_password:
- type: string
- description: OpenStack tenant password for openstack_username user
-
- openstack_tenant_name:
- type: string
- description: OpenStack tenant for VM deploy
-
- openstack_auth_url:
- type: string
- description: Authentication URL for OpenStack
-
- openstack_region:
- type: string
- description: OpenStack region
-
-dsl_definitions:
- openstack_credentials: &openstack_credentials
- username: { get_input: openstack_username }
- password: { get_input: openstack_password }
- tenant_name: { get_input: openstack_tenant_name }
- auth_url: { get_input: openstack_auth_url }
- region: { get_input: openstack_region }
-
-node_templates:
-
- key_pair:
- type: cloudify.openstack.nodes.KeyPair
- properties:
- private_key_path: { get_input: key_filename }
- use_external_resource: true
- resource_id: { get_input: keypair_name }
- openstack_config: *openstack_credentials
-
- private_net:
- type: cloudify.openstack.nodes.Network
- properties:
- use_external_resource: true
- resource_id: { get_input: app_subnet }
- openstack_config: *openstack_credentials
-
- external_network:
- type: cloudify.openstack.nodes.Network
- properties:
- openstack_config: *openstack_credentials
- use_external_resource: true
- resource_id: { get_input: external_network}
-
- app_security_group:
- type: cloudify.openstack.nodes.SecurityGroup
- properties:
- openstack_config: *openstack_credentials
- use_external_resource: true
- resource_id: { get_input: security_group }
-
- VM_001_fixed_ip:
- type: cloudify.openstack.nodes.Port
- properties:
- openstack_config: *openstack_credentials
- relationships:
- - type: cloudify.relationships.contained_in
- target: private_net
- - type: cloudify.openstack.port_connected_to_security_group
- target: app_security_group
-
- VM_001_floating_ip:
- type: cloudify.openstack.nodes.FloatingIP
- properties:
- openstack_config: *openstack_credentials
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- inputs:
- args:
- floating_network_name: { get_input: external_network }
-
- VM_001:
- type: cloudify.openstack.nodes.Server
- properties:
- openstack_config: *openstack_credentials
- install_agent: false
- image: { get_input: image }
- flavor: { get_input: flavor }
- resource_id: { get_input: vm_instancename }
- management_network_name: { get_input: app_subnet }
- ip: { get_attribute: [VM_001_floating_ip, floating_ip_address] }
- relationships:
- - type: cloudify.openstack.server_connected_to_floating_ip
- target: VM_001_floating_ip
- - type: cloudify.openstack.server_connected_to_keypair
- target: key_pair
- - type: cloudify.openstack.server_connected_to_port
- target: VM_001_fixed_ip
-
-
- app_001:
- type: cloudify.nodes.SoftwareComponent
- properties:
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- implementation: fabric.fabric_plugin.tasks.run_script
- inputs:
- script_path: scripts/policy.sh
- use_sudo: true
- process:
- args:
- [{ get_attribute: [VM_001_floating_ip, floating_ip_address]},
- { get_attribute: [VM_001_fixed_ip, fixed_ip_address]},
- { get_input: nexus_repo },
- { get_input: docker_repo},
- { get_input: nexus_username },
- { get_input: nexus_password },
- { get_input: artifacts_version },
- { get_input: dns_ip},
- { get_input: docker_version },
- { get_input: gerrit_branch },
- openstack,
- { get_input: external_dns},
- { get_input: policy_repo}]
- fabric_env:
- host_string: { get_attribute: [VM_001_floating_ip, floating_ip_address]}
- user: { get_input: ssh_user }
- key_filename: { get_input: key_filename }
- relationships:
- - type: cloudify.relationships.contained_in
- target: VM_001
- - type: cloudify.relationships.depends_on
- target: VM_001_floating_ip
-
-outputs:
- ONAP_Policy:
- description: informations about Policy
- value:
- ip: { get_attribute: [VM_001_floating_ip, floating_ip_address] }
-
-
-
-
-
-
-
-
diff --git a/TOSCA/VM/policy/scripts/policy.sh b/TOSCA/VM/policy/scripts/policy.sh
deleted file mode 100644
index f01420cbdf..0000000000
--- a/TOSCA/VM/policy/scripts/policy.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright (c) 2017 AT&T
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#============LICENSE_END============================================
-
-#
-PUBIP="$1"
-PVTIP="$2"
-NEXUS_REPO="$3"
-DOCKER_REPO="$4"
-NEXUS_USERNAME="$5"
-NEXUS_PASSWORD="$6"
-ARTIFACTS_VERSION="$7"
-DNS_IP="$8"
-DOCKER_VERSION="$9"
-GERRIT_BRANCH="$10"
-CLOUD_ENV="$11"
-EXETERNAL_DNS="$12"
-POLICY_REPO="$13"
-
-export HOSTNAME=`hostname`
-echo 127.0.1.1 $HOSTNAME >>/etc/hosts
-echo $PVTIP $HOSTNAME >>/etc/hosts
-echo $PUBIP $HOSTNAME >>/etc/hosts
-
-
-mkdir /opt/config
-chmod 777 /opt/config
-echo $PUBIP > /opt/config/public_ip.txt
-echo $NEXUS_REPO > /opt/config/nexus_repo.txt
-echo $DOCKER_REPO > /opt/config/nexus_docker_repo.txt
-echo $NEXUS_USERNAME > /opt/config/nexus_username.txt
-echo $NEXUS_PASSWORD > /opt/config/nexus_password.txt
-echo $ARTIFACTS_VERSION > /opt/config/artifacts_version.txt
-echo $DNS_IP > /opt/config/dns_ip_addr.txt
-echo $DOCKER_VERSION > /opt/config/docker_version.txt
-echo $GERRIT_BRANCH > /opt/config/gerrit_branch.txt
-echo $CLOUD_ENV > /opt/config/cloud_env.txt
-echo $EXETERNAL_DNS > /opt/config/external_dns.txt
-echo $POLICY_REPO > /opt/config/remote_repo.txt
-touch /opt/policy_install.sh
-chmod 777 /opt/policy_install.sh
-curl -k $NEXUS_REPO/org.onap.demo/boot/$ARTIFACTS_VERSION/policy_install.sh -o /opt/policy_install.sh;
-apt-get update
-apt-get install -y docker.io
-cd /opt
-chmod +x policy_install.sh
-/opt/policy_install.sh > policy_install.log 2>&1 \ No newline at end of file
diff --git a/TOSCA/VM/so/scripts/so.sh b/TOSCA/VM/so/scripts/so.sh
deleted file mode 100644
index ade760f507..0000000000
--- a/TOSCA/VM/so/scripts/so.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/sh
-
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright (c) 2017 AT&T
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#============LICENSE_END============================================
-
-#
-PUBIP="$1"
-PVTIP="$2"
-NEXUS_REPO="$3"
-DOCKER_REPO="$4"
-NEXUS_USERNAME="$5"
-NEXUS_PASSWORD="$6"
-ARTIFACTS_VERSION="$7"
-DNS_IP="$8"
-GERRIT_BRANCH="$9"
-CLOUD_ENV="$10"
-EXETERNAL_DNS="$11"
-DMAAP_TOPIC="$12"
-OPENSTACK_USERNAME="$13"
-TENANT_ID="$14"
-OPENSTACK_API_KEY="$15"
-OPENSTACK_REGION="$16"
-KEYSTONE="$17"
-SO_REPO="$18"
-DOCKER_VERSION="$19"
-
-export HOSTNAME=`hostname`
-echo 127.0.1.1 $HOSTNAME >>/etc/hosts
-echo $PVTIP $HOSTNAME >>/etc/hosts
-echo $PUBIP $HOSTNAME >>/etc/hosts
-
-
-mkdir /opt/config
-chmod 777 /opt/config
-echo $PUBIP > /opt/config/public_ip.txt
-echo $NEXUS_REPO > /opt/config/nexus_repo.txt
-echo $DOCKER_REPO > /opt/config/nexus_docker_repo.txt
-echo $NEXUS_USERNAME > /opt/config/nexus_username.txt
-echo $NEXUS_PASSWORD > /opt/config/nexus_password.txt
-echo $ARTIFACTS_VERSION > /opt/config/artifacts_version.txt
-echo $DNS_IP > /opt/config/dns_ip_addr.txt
-echo $DOCKER_VERSION > /opt/config/docker_version.txt
-echo $GERRIT_BRANCH > /opt/config/gerrit_branch.txt
-echo $CLOUD_ENV > /opt/config/cloud_env.txt
-echo $EXETERNAL_DNS > /opt/config/external_dns.txt
-echo $DMAAP_TOPIC > /opt/config/dmaap_topic.txt
-echo $OPENSTACK_USERNAME > /opt/config/openstack_username.txt
-echo $TENANT_ID > /opt/config/tenant_id.txt
-echo $OPENSTACK_API_KEY > /opt/config/openstack_api_key.txt
-echo $OPENSTACK_REGION > /opt/config/openstack_region.txt
-echo $KEYSTONE > /opt/config/keystone.txt
-echo $SO_REPO > /opt/config/remote_repo.txt
-
-if [ ! -f /opt/rebooted ]; then
- touch /opt/so_install.sh
- chmod 777 /opt/so_install.sh
- curl -k $NEXUS_REPO/org.onap.demo/boot/$ARTIFACTS_VERSION/so_install.sh -o /opt/so_install.sh
- cd /opt
- chmod +x so_install.sh
-
- echo " if [ ! -f /opt/rebooted ]; then" >/opt/install_tmp
- echo " touch /opt/rebooted">>/opt/install_tmp
- echo " reboot" >>/opt/install_tmp
- echo " fi">>/opt/install_tmp
- sed -i -e "/\treboot/r install_tmp" -e "//d" so_install.sh
-fi
-/opt/so_install.sh > so_install.log 2>&1 \ No newline at end of file
diff --git a/TOSCA/VM/so/so.yaml b/TOSCA/VM/so/so.yaml
deleted file mode 100644
index e4719dff02..0000000000
--- a/TOSCA/VM/so/so.yaml
+++ /dev/null
@@ -1,273 +0,0 @@
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright (c) 2017 AT&T
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#============LICENSE_END============================================
-
-###########################################################
-# This Blueprint installs ONAP SO on Openstack
-###########################################################
-
-tosca_definitions_version: cloudify_dsl_1_3
-
-description: >
- This blueprint creates a ONAP service orchestrator
-
-imports:
- - http://www.getcloudify.org/spec/cloudify/4.1/types.yaml
- - http://cloudify.co/spec/openstack-plugin/2.2.0/plugin.yaml
- - http://cloudify.co/spec/fabric-plugin/1.5/plugin.yaml
- - http://www.getcloudify.org/spec/diamond-plugin/1.3.3/plugin.yaml
-
-inputs:
- nexus_repo:
- type: string
- default: https://nexus.onap.org/content/sites/raw
-
- docker_repo:
- type: string
- default: nexus3.onap.org:10001
-
- nexus_username:
- type: string
- default: docker
-
- nexus_password:
- type: string
- default: docker
-
- artifacts_version:
- type: string
- default: 1.1.0-SNAPSHOT
-
- dns_ip:
- type: string
-
- cloud_env:
- type: string
- default: openstack
-
- docker_version:
- type: string
- default: v1.1.1
-
- gerrit_branch:
- type: string
- default: master
-
- external_dns:
- type: string
-
- dmaap_topic:
- type: string
- default: AUTO
-
- so_repo:
- type: string
- default: http://gerrit.onap.org/r/so/docker-config.git
-
- vm_instancename:
- type: string
-
- image:
- description: |
- A Ubuntu 16.04 image, along with the usual
- type: string
- default: Ubuntu 16.04 LTS Generic
-
- flavor:
- description: >
- A machine type with more than 2 CPUs, 4096 GB RAM, and 8 GB of disk space.
- You might want to use 4 CPUs, 8192 MB RAM for the master.
- type: string
- default: m1.xlarge
-
- ssh_user:
- description: |
- User for connecting to agent VM
- type: string
- default: ubuntu
-
- security_group:
- description: openstack security_group
- type: string
-
- keypair_name:
- description: |
- OpenStack keypair name
- type: string
-
- key_filename:
- type: string
-
- external_network:
- type: string
-
- app_subnet:
- type: string
-
-
- openstack_username:
- type: string
- description: username to authenticate to OpenStack
-
- openstack_password:
- type: string
- description: OpenStack tenant password for openstack_username user
-
- openstack_tenant_name:
- type: string
- description: OpenStack tenant for VM deploy
-
- openstack_auth_url:
- type: string
- description: Authentication URL for OpenStack
-
- openstack_region:
- type: string
- description: OpenStack region
-
-dsl_definitions:
- openstack_credentials: &openstack_credentials
- username: { get_input: openstack_username }
- password: { get_input: openstack_password }
- tenant_name: { get_input: openstack_tenant_name }
- auth_url: { get_input: openstack_auth_url }
- region: { get_input: openstack_region }
-
-node_templates:
-
- key_pair:
- type: cloudify.openstack.nodes.KeyPair
- properties:
- private_key_path: { get_input: key_filename }
- use_external_resource: true
- resource_id: { get_input: keypair_name }
- openstack_config: *openstack_credentials
-
- private_net:
- type: cloudify.openstack.nodes.Network
- properties:
- use_external_resource: true
- resource_id: { get_input: app_subnet }
- openstack_config: *openstack_credentials
-
- external_network:
- type: cloudify.openstack.nodes.Network
- properties:
- openstack_config: *openstack_credentials
- use_external_resource: true
- resource_id: { get_input: external_network}
-
- app_security_group:
- type: cloudify.openstack.nodes.SecurityGroup
- properties:
- openstack_config: *openstack_credentials
- use_external_resource: true
- resource_id: { get_input: security_group }
-
- VM_001_fixed_ip:
- type: cloudify.openstack.nodes.Port
- properties:
- openstack_config: *openstack_credentials
- relationships:
- - type: cloudify.relationships.contained_in
- target: private_net
- - type: cloudify.openstack.port_connected_to_security_group
- target: app_security_group
-
- VM_001_floating_ip:
- type: cloudify.openstack.nodes.FloatingIP
- properties:
- openstack_config: *openstack_credentials
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- inputs:
- args:
- floating_network_name: { get_input: external_network }
-
- VM_001:
- type: cloudify.openstack.nodes.Server
- properties:
- openstack_config: *openstack_credentials
- install_agent: false
- image: { get_input: image }
- flavor: { get_input: flavor }
- resource_id: { get_input: vm_instancename }
- management_network_name: { get_input: app_subnet }
- ip: { get_attribute: [VM_001_floating_ip, floating_ip_address] }
- relationships:
- - type: cloudify.openstack.server_connected_to_floating_ip
- target: VM_001_floating_ip
- - type: cloudify.openstack.server_connected_to_keypair
- target: key_pair
- - type: cloudify.openstack.server_connected_to_port
- target: VM_001_fixed_ip
-
-
- app_001:
- type: cloudify.nodes.SoftwareComponent
- properties:
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- implementation: fabric.fabric_plugin.tasks.run_script
- inputs:
- script_path: scripts/so.sh
- use_sudo: true
- process:
- args:
- [{ get_attribute: [VM_001_floating_ip, floating_ip_address]},
- { get_attribute: [VM_001_fixed_ip, fixed_ip_address]},
- { get_input: nexus_repo },
- { get_input: docker_repo},
- { get_input: nexus_username },
- { get_input: nexus_password },
- { get_input: artifacts_version },
- { get_input: dns_ip},
- { get_input: gerrit_branch },
- openstack,
- { get_input: external_dns},
- { get_input: dmaap_topic},
- { get_input: openstack_username},
- { get_input: openstack_tenant_name},
- { get_input: openstack_password},
- { get_input: openstack_region},
- { get_input: openstack_auth_url},
- { get_input: so_repo},
- { get_input: docker_version }]
- fabric_env:
- host_string: { get_attribute: [VM_001_floating_ip, floating_ip_address]}
- user: { get_input: ssh_user }
- key_filename: { get_input: key_filename }
- relationships:
- - type: cloudify.relationships.contained_in
- target: VM_001
- - type: cloudify.relationships.depends_on
- target: VM_001_floating_ip
-
-outputs:
- ONAP_Service_Orchestrator:
- description: informations about Service Orchestrator
- value:
- ip: { get_attribute: [VM_001_floating_ip, floating_ip_address] }
-
-
-
-
-
-
-
-
diff --git a/cloudify-onap/blueprint.yaml b/cloudify-onap/blueprint.yaml
deleted file mode 100644
index 96d297dbd3..0000000000
--- a/cloudify-onap/blueprint.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-tosca_definitions_version: cloudify_dsl_1_3
-
-description: >
- Blueprint deploys all ONAP kubernetes resources defined in YAML files on existing kubernetes cluster
- The following pre-setup steps are assumed, but not required:
- - Create Cloudify Example Environment: https://github.com/cloudify-examples/cloudify-environment-setup.
- - Create Kubernetes Cluster: https://github.com/cloudify-examples/simple-kubernetes-blueprint.
-
-imports:
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
- # Plugin required: https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/releases/download/1.2.1rc1/cloudify_kubernetes_plugin-1.2.1rc1-py27-none-linux_x86_64-centos-Core.wgn
- - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1/plugin.yaml
-# - https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1.zip
- - plugins/onap-installation-plugin/plugin.yaml
-
-inputs:
- kubernetes_configuration_file_content:
- description: >
- File content of kubernetes master YAML configuration
- default:
- apiVersion: v1
- clusters:
- - cluster:
- certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFM01UQXdNakV6TVRNeU4xb1hEVEkzTURrek1ERXpNVE15TjFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTDN4CnRqRllVd25GVmxnNFZhd3BNaFB1N2hDOXVNSEJmUU9JclIrU1B4TElaMUVjTERXdTBod1pWWloxSUF4K0NrQlcKU1BmVDdXSE5zNld4RHAyRUpoVlp2TEFBVW9nem5aSlJnU0Z2RW1NZWg4cXhTaSsrQmtNNmxvTytTTVE0eFJlVApFbTZpd3JrdFZNUXVocDhkVG01MGpIUjMyelBTQklZRGpQejYwWEpzNXp2MEJzL0VlSHRDS0RxV2ZoZWpYZjBECmhQNi9DSU45UjBwNk9UZTdSYUV6dUtlblRtTml0dzNJTkg4b3BhYlY2TW8ycEFvbVRRUG5pZTVZZ2EzNGNndXEKTXpYSmVQUE1CQSs4UDNmS3BFcDJQU3UzWUF3SGg4VWo0MlRDTjMxQ1BBM3F4dUxGejBwOGw5ckJ5WVh6amRybQpwNFZIV1FGOTh3eXBFUW0xNFdVQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFBS3Q2a2E2R04rWksveEx5Nk1tcWpSQTZpa2QKMjYwS3g0ak9FWUtrNWtTemlPOE5Iazhza3JNNzZSVksxdUwxZkFzU0N5SnYyOUJNb3NGQkhISDltOGNBbnFFcgowRGVvdDhHY0d6OWh6ZTlOcGhGNkFNNEhCZTFDbDhkME9vdm91K3dsYk1VTDN4M1hNUHZmMDRKajZsMUJMdEx2Cjh2Nk5Eb0lWdkppS1FLM0tuK3BHUVBZd2x1UEFoSEZQZUdlZ1gvTnBlNXJjMVZGRUtLUkRQRVFRS0xrcE5TZ2IKNzgwZ2ZMTkxJUTJsQnczZHRjazcyelkyK0dlWlp3MEM2ckk5QUhBekg3ZXpZY0pvT2VmVjArZE9zV2lJNVB4UgowZnNYSlNEOVR4SDVVMlRSQ25KZ214M1ZLTGl5OTZyOGsrOTdkWlAvN3h2Q3dGSU9wc2I2UGcxdjNxZz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
- server: https://192.168.120.5:6443
- name: kubernetes
- contexts:
- - context:
- cluster: kubernetes
- user: kubernetes-admin
- name: kubernetes-admin@kubernetes
- current-context: kubernetes-admin@kubernetes
- kind: Config
- preferences: {}
- users:
- - name: kubernetes-admin
- user:
- client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJRUFMaVFKV0d2ZmN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4TnpFd01ESXhNekV6TWpkYUZ3MHhPREV3TURJeE16RXpNamxhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVOWFFJSlNzSmdtWndXMTEKcEtRZTV5TkQ1THBUdDY5dENKeEpoWXZPbElUOTRwVmdtVnBLNjhlMUNhQ2YxYkt0N2hCMUFOSVJHam8xMTYxNgpGUVRkaE5ZN0tnZFFUNGsrMC9mV0hBZzdCYnhKSDFCdEVvOGorTFk0dDRMNUVCS1l2dTN4Z21NS2E3SGMxZiszCkRQdWtndDZUZm1JazZEM0JkTHQyays2ZDhMclAzYmtkVmQwS1o2OWtnRnBuSWxtNkdrQWhrZWRleE5PQ21vRE8KU0hHMEJvaWphSjRuNUUrWGtNUEpHM3c3aUhNeE5RQ1Fob1lVOC84MGZHWUd5Y2M5TklLSlRFWmVjVGxOeCtPVApyd1F4aWRWa0FNK3I2K085ZlVMSmV1VDlhbXhzOEJNbVNwQko2TU0xelZaTDA5RGp3QytKSFRmY25IMTRBTTJ5CnN3c1hSUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFCNkRNanRFQkMvSTlkK1VqYWNXM0R5K0pZd21hYWdOUHJycApyaWV0OExlMHFuOS9GUkRpcnpTZldUNXFSNzg3dEpqRWdIaHlvZzFQWE1ZY3l6ajhRS0hyTXJXZUJMck5EQ08wCkVFTGMzSnNyUkdiK0MxWk1MQ3k1VmRVd2Q4ZnYzNm5vRlZOelVneitBTG9WSmVUUEdBYUxTbjA5OWo0RXhxUXoKK3FYTDltZEpQc1FwVVR4OTN3Z1ppcjJRMzcxdXovU21UYkxsOEhjMUtkbC9MODFXM0xYY3JrbFAvbFBkUG9yVgpjUmFBYm94NjBTMmxFTE16dUZHRThaR0lPYlFseDJUdzAxcENvckQ0NHVqOHU1ZmRiWkxhRHZuSHZDODlvSjhNCml5VnU5dkxSajhzbmZvb0ZLMjVqOGI1SzNGMGlLcWRzaktiSWVPS3dKWWkwdGNPWC9GYz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
- client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdU5YUUlKU3NKZ21ad1cxMXBLUWU1eU5ENUxwVHQ2OXRDSnhKaFl2T2xJVDk0cFZnCm1WcEs2OGUxQ2FDZjFiS3Q3aEIxQU5JUkdqbzExNjE2RlFUZGhOWTdLZ2RRVDRrKzAvZldIQWc3QmJ4SkgxQnQKRW84aitMWTR0NEw1RUJLWXZ1M3hnbU1LYTdIYzFmKzNEUHVrZ3Q2VGZtSWs2RDNCZEx0MmsrNmQ4THJQM2JrZApWZDBLWjY5a2dGcG5JbG02R2tBaGtlZGV4Tk9DbW9ET1NIRzBCb2lqYUo0bjVFK1hrTVBKRzN3N2lITXhOUUNRCmhvWVU4LzgwZkdZR3ljYzlOSUtKVEVaZWNUbE54K09UcndReGlkVmtBTStyNitPOWZVTEpldVQ5YW14czhCTW0KU3BCSjZNTTF6VlpMMDlEandDK0pIVGZjbkgxNEFNMnlzd3NYUlFJREFRQUJBb0lCQVFDTjVnL2ZRMlFaMjVyLwp4Vkhnc09MTW5lVThqdE5BbkZ1L2xZUllFeW5vR1dWejh5dnk3eisvWFZVZDdBTnBJc0NtbUZuSzVIcWV3UHVlCjlaNVl6cUJsbjdaU0lNVkdHbDV5aXIwRVRrbEpzUEFPSzBkaFRTVFhoc1ZxRlJHeWhpUmZ4L3AyR0NUaTR1eGcKR2UzaytMZW1oTXVFNWtIaFRhL2NOeUxrbFBjSjNJTHl5bnNOSmtwSlI4M0FBb1NxVkVObHhmQVFLT3VUVnNqOApRTXF4ekNMblU1RVFYc0pCamVNa0YrVllFdVRUZENUUm8vWXluR3g0K2ptR3IxTGNRRExjbllpTGtuS2YvTjNRCmpBV2QwUGVZS2p6V0M3aGs3a0JKdEl5MjcyVEtudS9nN3FIQktSVVNSUnBnWHFsTGJMZDVpZTdzTXJEdnMyd3AKMjhxK01XbE5Bb0dCQU5CSzVDc2JGNSsxZkx3VldPaWUxRlpTSVAzMnQvRWJvODJkS1VIZWxGVldEQXV3cXRUTQpsT2psUnRZVkxRODV1WWtxMExBRkl2TjNlNDlmcWQ3aUk3cEE4WE5uZnovWVozRGFEREZ0cVRuZU84VmlOTHkvCjd0SUh4bWVwWHJpay9GUXFNZjNmNnNiYzBTcDgvSXdXWlZ4Y2hmOVVFV3pOYmZpNDZoV3BjTjlUQW9HQkFPTXIKZzhrMHFlU01teTc5Z3pwMTVvT0VMWW9VdkhLZTJibk8wbnA1aldhbVprdGR1blRNMGg2dmdLQTF5UnNnejFjNApncmtZUkp5V1JhRElYb0YzOVdvMWJrbHZvaWN5NURqWUtBK0hKWUVUT0pmOC9odzJ1WngzTk54bm5UZXpJSVA5CnVDZHlJSDhRNFU1VkloeWVXTDV5WlR6WGs3YlhyRVllZjB3cW81UUhBb0dBZlhiZkFXZ01UdE5QRXl2NHBnWWwKK09qM25vSmlRZ1ZZSVo0dEg5UW1uVkI3YVA4OXAzaWpxYldSZVpxL3paQUR4aS9ZREc3TG9zT3gxYWJWOTc5WApyZlU0ZXo4NFV3alRKaEx4alVSMHpycWlYajdOYlhSZk1ud2tjb1IrM2RIamUvNytwZUdlMWJKays2YlZxTHhFCnllR2hoUzdxWUJOTnpxZnp1S0Nic0trQ2dZRUF6N2g4ZXNEekVJOFpYekJrakZJK2c4ZWJOSVdkZzFtSlVRT3oKSmxaN1czK1FUaDNZWHZEaXVUaFZieWwvU0pVSndvRmoxd2cveE1jTHgrY3ZzMGNUV3hpY1RmNEwwYmdSUTRFegpzRzh0ZGdjeldwYjFiS3NGc2ZLMm5Vc0pVV016dWoySDVGblJLUjh3UmNaR3VOQjU2VHNGSTBza1FLNlpVa3lVCnJmclhOSVVDZ1lFQW83NGp6NnBJbDgwOU51dERZeHVON3J4RzYrTVMwRkVRa2tTSGdtZTlvN0Y0QjNWRHJ6WE4Ka1g3dC95cm1ieHBjK0R2VmUzWm1hWHE1QXJzdjRVbW5Za1ArNCs0L2REcWdmbjd3ZmVKUkpzU1Uzd2V0YWJnRwpDQW5xMFpLR3RJWWhud1h5cWF6elBsUUFteFIxWUdDczIxL05kUVJvWDdsOFdyaUJmbkpVM3hZPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
-
- namespace_prefix:
- type: string
- description: >
- Kubernetes namespace name prefix which will be uese for all ONAP apps
- default: onap
-
-dsl_definitions:
- options: &app_options
- namespace:
- concat: [{ get_input: namespace_prefix }, '-', { get_property: [SELF, name] }]
-
-node_templates:
- kubernetes_master:
- type: cloudify.kubernetes.nodes.Master
- properties:
- configuration:
- file_content: { get_input: kubernetes_configuration_file_content }
-
- onap_environment:
- type: cloudify.onap.kubernetes.Environment
- properties:
- namespace: { get_input: namespace_prefix }
- init_pod: kubernetes/config
- options:
- namespace: { get_input: namespace_prefix }
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
-
-# mso_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: mso
-# path: kubernetes/mso
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
-#
- message_router_app:
- type: cloudify.onap.kubernetes.App
- properties:
- name: message-router
- path: kubernetes/message-router
- options: *app_options
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: kubernetes_master
- - type: cloudify.relationships.depends_on
- target: onap_environment
-#
-# sdc_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: sdc
-# path: kubernetes/sdc
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
-#
-# aai_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: aai
-# path: kubernetes/aai
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
-#
-# robot_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: robot
-# path: kubernetes/robot
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
-#
-# vid_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: vid
-# path: kubernetes/vid
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
-#
-# sdnc_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: sdnc
-# path: kubernetes/sdnc
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
-#
-# portal_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: portal
-# path: kubernetes/portal
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
-#
-# policy_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: policy
-# path: kubernetes/policy
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
-#
-# appc_app:
-# type: cloudify.onap.kubernetes.App
-# properties:
-# name: appc
-# path: kubernetes/appc
-# options: *app_options
-# relationships:
-# - type: cloudify.kubernetes.relationships.managed_by_master
-# target: kubernetes_master
-# - type: cloudify.relationships.depends_on
-# target: onap_environment
diff --git a/cloudify-onap/docker-custom-readiness/Dockerfile b/cloudify-onap/docker-custom-readiness/Dockerfile
deleted file mode 100644
index d42456d336..0000000000
--- a/cloudify-onap/docker-custom-readiness/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-from ubuntu:16.04
-
-ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST"
-# Setup Corporate proxy
-ENV https_proxy ${HTTPS_PROXY}
-ENV http_proxy ${HTTP_PROXY}
-
-# Additional packages
-RUN apt-get update
-RUN apt-get install -y apt-utils git wget curl dnsutils python python-pip jq net-tools coreutils vim
-
-RUN pip install requests pyyaml kubernetes==1.0.2
-
-
-ENV CERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
-ENV TOKEN="/var/run/secrets/kubernetes.io/serviceaccount/token"
-
-COPY ready.py /root/ready.py
-RUN chmod a+x /root/ready.py
-#ENTRYPOINT /root/ready.py
-
diff --git a/cloudify-onap/docker-custom-readiness/ready.py b/cloudify-onap/docker-custom-readiness/ready.py
deleted file mode 100644
index 22b24d345d..0000000000
--- a/cloudify-onap/docker-custom-readiness/ready.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/python
-#from kubernetes import client, config
-import kubernetes
-import time, argparse, logging, sys, os, base64
-import yaml
-
-#setup logging
-log = logging.getLogger(__name__)
-handler = logging.StreamHandler(sys.stdout)
-handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
-handler.setLevel(logging.DEBUG)
-log.addHandler(handler)
-log.setLevel(logging.DEBUG)
-
-
-def is_ready(container_name):
- log.info( "Checking if " + container_name + " is ready")
-
- kubernetes.config.kube_config.KubeConfigLoader(config_dict=get_k8s_config_env()).load_and_set()
- client = kubernetes.client
- namespace = get_namespace_env()
- v1 = client.CoreV1Api()
-
- ready = False
-
- try:
- response = v1.list_namespaced_pod(namespace=namespace, watch=False)
- for i in response.items:
- for s in i.status.container_statuses:
- if s.name == container_name:
- log.debug ( "response %s" % response )
- ready = s.ready
- if not ready:
- log.info( container_name + " is not ready.")
- else:
- log.info( container_name + " is ready!")
- else:
- continue
- return ready
- except Exception as e:
- log.error("Exception when calling list_namespaced_pod: %s\n" % e)
-
-
-def get_k8s_config_env():
- try:
- k8s_config_env = os.environ.get("K8S_CONFIG_B64")
- decoded = base64.b64decode(k8s_config_env)
- return yaml.load(decoded)
- except KeyError as ke:
- raise Exception("K8S_CONFIG_B64 variable is not set.")
-
-
-def get_namespace_env():
- try:
- namespace_env = os.environ.get("NAMESPACE")
- return namespace_env
- except KeyError as ke:
- raise Exception("NAMESPACE variable is not set.")
-
-
-def main(args):#from kubernetes import client, config
-
- # args are a list of container names
- for container_name in args:
- # 5 min, TODO: make configurable
- timeout = time.time() + 60 * 10
- while True:
- ready = is_ready(container_name)
- if ready is True:
- break
- elif time.time() > timeout:
- log.warning( "timed out waiting for '" + container_name + "' to be ready")
- exit(1)
- else:
- time.sleep(5)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='Process some names.')
- parser.add_argument('--container-name', action='append', required=True, help='A container name')
- args = parser.parse_args()
- arg_dict = vars(args)
-
- for arg in arg_dict.itervalues():
- main(arg)
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/__init__.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/__init__.py
deleted file mode 100644
index 19a30ba43d..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/app.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/app.py
deleted file mode 100644
index fc8af2eea4..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/app.py
+++ /dev/null
@@ -1,47 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-
-from cloudify.decorators import operation
-
-from common import namespace
-from common import resources_services
-from common import workarounds
-
-
-@operation
-def configure_docker_secret(**kwargs):
- workarounds.configure_secret()
-
-
-@operation
-def create_namespace(**kwargs):
- namespace.do_create_namespace()
-
-
-@operation
-def create_resources_services(**kwargs):
- resources_services.create_resoruces()
-
-
-@operation
-def delete_namespace(**kwargs):
- namespace.do_delete_namespace()
-
-
-@operation
-def delete_resources_services(**kwargs):
- resources_services.delete_resoruces()
-
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/__init__.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/__init__.py
deleted file mode 100644
index 19a30ba43d..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/constants.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/constants.py
deleted file mode 100644
index 493a44f16f..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/constants.py
+++ /dev/null
@@ -1,20 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-HELM_URL = 'https://kubernetes-helm.storage.googleapis.com/helm-canary-linux-amd64.tar.gz'
-OOM_GIT_URL = 'https://gerrit.onap.org/r/oom.git'
-
-RT_HELM_CLI_PATH = "helm_cli_path"
-RT_APPS_ROOT_PATH = "app_root_path"
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/deployment_result.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/deployment_result.py
deleted file mode 100644
index 48d49e0403..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/deployment_result.py
+++ /dev/null
@@ -1,27 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-
-
-def save_deployment_result(key):
- result = ctx.instance.runtime_properties['kubernetes']
- ctx.instance.runtime_properties[key] = result
- ctx.instance.runtime_properties['kubernetes'] = {}
-
-
-def set_deployment_result(key):
- result = ctx.instance.runtime_properties.pop(key)
- ctx.instance.runtime_properties['kubernetes'] = result \ No newline at end of file
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/helm.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/helm.py
deleted file mode 100644
index 4404f6f832..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/helm.py
+++ /dev/null
@@ -1,62 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import urllib
-import tarfile
-import os
-import tempfile
-from git import Repo
-
-def get_helm_path(url):
- tarball = _fetch_helm(url)
- helm_dir = _get_tmp_file_name()
- _untar_helm_archive(tarball, helm_dir)
- helm_binary_path = _find_file('helm', helm_dir)
- return helm_binary_path
-
-
-def get_apps_root_path(git_url):
- dst_repo_path = _get_tmp_file_name()
- Repo.clone_from(git_url, dst_repo_path)
- apps_root = format(dst_repo_path)
- return apps_root
-
-def _fetch_helm(url):
- dst_tar_path = _get_tmp_file_name()
-
- file = urllib.URLopener()
- file.retrieve(url, dst_tar_path)
-
- return dst_tar_path
-
-def _untar_helm_archive(tar_path, helm_dir):
- helm_tar = tarfile.open(tar_path)
- helm_tar.extractall(helm_dir)
- helm_tar.close()
-
-
-def _find_file(filename, base_path):
- for root, dirs, files in os.walk(base_path):
- for name in files:
- if name == filename:
- return os.path.abspath(os.path.join(root, name))
-
- raise Exception('Cannot find helm binary')
-
-
-def _get_tmp_file_name():
- return '{}/{}'.format(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
-
-
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/init_pod.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/init_pod.py
deleted file mode 100644
index 1376818b7b..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/init_pod.py
+++ /dev/null
@@ -1,63 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-import yaml
-
-import constants
-import resources_services
-
-SERVICES_FILE_PARTS_SEPARATOR = '---'
-
-
-def do_create_init_pod():
- ctx.logger.info('Creating init pod')
-
- yaml_config = resources_services.render_chart(
- ctx.node.properties["init_pod"],
- _retrieve_root_path(),
- _retrieve_helm_cli_path()
- )
- yaml_content_part = yaml_config.split(SERVICES_FILE_PARTS_SEPARATOR)[2]
- enhanced_yaml = _add_openstack_envs(yaml_content_part)
-
- resources_services.create_resource(enhanced_yaml)
-
- ctx.logger.info('Init pod created successfully')
-
-
-def do_delete_init_pod():
- ctx.logger.info('Deleting init pod')
-
- ctx.logger.info('Init pod deleted successfully')
-
-def _add_openstack_envs(yaml_content):
- input_dict = yaml.load(yaml_content)
-
- container_dict = input_dict['spec']['containers'][0]
- container_dict.pop('envFrom')
-
- openstack_envs = ctx.node.properties["openstack_envs"]
- for item in openstack_envs.items():
- ctx.logger.debug("adding item = {}".format(item))
- container_dict['env'].append(item)
-
- return input_dict
-
-def _retrieve_root_path():
- return ctx.instance.runtime_properties.get(constants.RT_APPS_ROOT_PATH, None)
-
-def _retrieve_helm_cli_path():
- return ctx.instance.runtime_properties.get(constants.RT_HELM_CLI_PATH, None) \ No newline at end of file
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/namespace.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/namespace.py
deleted file mode 100644
index d1336768ac..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/namespace.py
+++ /dev/null
@@ -1,101 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import cloudify_kubernetes.tasks as kubernetes_plugin
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-import deployment_result
-
-
-def do_create_namespace():
- namespace = _retrieve_namespace()
- ctx.logger.info('Creating namespace: {0}'.format(namespace))
-
- namespace_resource_template = _prepare_namespace_resource_template(
- namespace
- )
-
- ctx.logger.debug(
- 'Kubernetes object which will be deployed: {0}'
- .format(namespace_resource_template)
- )
-
- kubernetes_plugin.custom_resource_create(**namespace_resource_template)
- deployment_result.save_deployment_result('namespace')
- ctx.logger.info('Namespace created successfully')
-
-
-def do_delete_namespace():
- namespace = _retrieve_namespace()
- ctx.logger.info('Deleting namespace: {0}'.format(namespace))
-
- namespace_resource_template = _prepare_namespace_resource_template(
- namespace
- )
-
- ctx.logger.debug(
- 'Kubernetes object which will be deleted: {0}'
- .format(namespace_resource_template)
- )
-
- deployment_result.set_deployment_result('namespace')
- kubernetes_plugin.custom_resource_delete(**namespace_resource_template)
- ctx.logger.info('Namespace deleted successfully')
-
-
-
-def _retrieve_namespace():
-
- default_namespace = ctx.node.properties.get('options', {}).get('namespace')
- namespace = ctx.node.properties.get('namespace', default_namespace)
-
- if not namespace:
- raise NonRecoverableError(
- 'Namespace is not defined (node={})'.format(ctx.node.name)
- )
-
- return namespace
-
-
-def _prepare_namespace_resource_template(name):
- return {
- 'definition': {
- 'apiVersion': 'v1',
- 'kind': 'Namespace',
- 'metadata': {
- 'name': name,
- 'labels': {
- 'name': name
- },
- },
- },
- 'api_mapping': {
- 'create': {
- 'api': 'CoreV1Api',
- 'method': 'create_namespace',
- 'payload': 'V1Namespace'
- },
- 'read': {
- 'api': 'CoreV1Api',
- 'method': 'read_namespace',
- },
- 'delete': {
- 'api': 'CoreV1Api',
- 'method': 'delete_namespace',
- 'payload': 'V1DeleteOptions'
- }
- }
- }
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/resources_services.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/resources_services.py
deleted file mode 100644
index 268068f00c..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/resources_services.py
+++ /dev/null
@@ -1,230 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import subprocess
-
-import cloudify_kubernetes.tasks as kubernetes_plugin
-import yaml
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-import constants
-import deployment_result
-import time
-import ast
-import json
-import base64
-
-SERVICES_FILE_PARTS_SEPARATOR = '---'
-
-
-def create_resoruces():
- ctx.logger.info('Creating resources')
- apps_path = _retrieve_root_path()
-
- if not apps_path:
- ctx.logger.warn(
- 'Apps dir is not defined. Skipping!'
- )
-
- return
-
- helm_app = ctx.node.properties.get('path', None)
-
- yaml_file = prepare_content(helm_app)
-
- yaml_content_parts = yaml_file.split(SERVICES_FILE_PARTS_SEPARATOR)
-
- for yaml_content_part in yaml_content_parts:
- if yaml_content_part:
- yaml_content = _apply_readiness_workaround(yaml_content_part)
- if yaml_content:
- create_resource(yaml_content)
-
- ctx.logger.info('Resource created successfully')
-
-def delete_resoruces():
-
- ctx.logger.info('Deleting resources')
- apps_path = _retrieve_root_path()
-
- if not apps_path:
- ctx.logger.warn(
- 'Apps dir is not defined. Skipping!'
- )
- return
-
- helm_app = ctx.node.properties.get('path', None)
-
- yaml_file = prepare_content(helm_app)
-
- yaml_content_parts = yaml_file.split(SERVICES_FILE_PARTS_SEPARATOR)
-
- for yaml_content_part in yaml_content_parts:
- if yaml_content_part:
- yaml_content = _apply_readiness_workaround(yaml_content_part)
- if yaml_content:
- delete_resource(yaml_content)
-
- ctx.logger.info('Resources deleted successfully')
-
-
-def prepare_content(resource):
- helm_path = _retrieve_helm_cli_path()
- yaml_file = render_chart(resource, _retrieve_root_path(), helm_path)
-
- return yaml_file
-
-
-def create_resource(yaml_content_dict):
- ctx.logger.debug("Loading yaml: {}".format(yaml_content_dict))
-
- if yaml_content_dict.get('kind', '') == 'PersistentVolumeClaim':
- ctx.logger.debug("PersistentVolumeClaim custom handling")
- kubernetes_plugin.custom_resource_create(definition=yaml_content_dict, api_mapping=_get_persistent_volume_mapping_claim_api())
- else:
- kubernetes_plugin.resource_create(definition=yaml_content_dict)
-
- deployment_result.save_deployment_result('resource_{0}'.format(yaml_content_dict['metadata']['name']))
-
-def delete_resource(yaml_content_dict):
- ctx.logger.debug("Loading yaml: {}".format(yaml_content_dict))
-
- deployment_result.save_deployment_result('resource_{0}'.format(yaml_content_dict['metadata']['name']))
- if yaml_content_dict.get('kind', '') == 'PersistentVolumeClaim':
- ctx.logger.debug("PersistentVolumeClaim custom handling")
- kubernetes_plugin.custom_resource_delete(definition=yaml_content_dict, api_mapping=_get_persistent_volume_mapping_claim_api())
- else:
- kubernetes_plugin.resource_delete(definition=yaml_content_dict)
-
-
-def render_chart(app, app_root_path, helm_cli_path):
- app_chart_path = "{}/{}/".format(app_root_path, app)
- ctx.logger.debug('App chart path = {}'.format(app_chart_path))
- return _exec_helm_template(helm_cli_path, app_chart_path)
-
-
-def _exec_helm_template(helm_path, chart):
- cmd = '{0} template {1}'.format(helm_path, chart)
- ctx.logger.debug('Executing helm template cmd: {}'.format(cmd))
- rendered = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE).stdout.read().decode()
-
- return rendered
-
-def _get_persistent_volume_mapping_claim_api():
- api_mapping = {
- 'create' : {
- 'api': 'CoreV1Api',
- 'method': 'create_namespaced_persistent_volume_claim',
- 'payload': 'V1PersistentVolumeClaim'
- },
- 'read' : {
- 'api': 'CoreV1Api',
- 'method': 'read_namespaced_persistent_volume_claim',
- },
- 'delete': {
- 'api': 'CoreV1Api',
- 'method': 'delete_namespaced_persistent_volume_claim',
- 'payload': 'V1DeleteOptions'
- }
- }
-
- return api_mapping
-
-
-def _apply_readiness_workaround(yaml_file):
- b64_env = _get_k8s_b64_env()
-
- input_dict = yaml.load(yaml_file)
-
- try:
- init_containers = input_dict['spec']['template']['metadata']['annotations'][
- 'pod.beta.kubernetes.io/init-containers']
- init_cont_list = eval(init_containers)
-
- new_init_cont_list = list()
- new_cont = None
- for init_cont in init_cont_list:
- if "oomk8s/readiness-check" in init_cont['image']:
- init_cont['image'] = "clfy/oomk8s-cfy-readiness-check:1.0.1"
- #init_cont['imagePullPolicy'] = "IfNotPresent"
- init_cont['env'].append(b64_env)
- new_cont = init_cont
- new_init_cont_list.append(json.dumps(init_cont))
-
- new_payload = ",".join(new_init_cont_list)
-
- if new_cont:
- input_dict['spec']['template']['metadata']['annotations'].pop('pod.beta.kubernetes.io/init-containers')
- input_dict['spec']['template']['metadata']['annotations']['pod.beta.kubernetes.io/init-containers'] = '[{}]'.format(new_payload)
-
-
- except KeyError as ke:
- ctx.logger.debug('Readiness section is not found.')
-
- return input_dict
-
-
-def _get_k8s_b64():
- target_relationship = _retrieve_managed_by_master()
-
- k8s_config = target_relationship.node.properties.get('configuration').get('file_content')
-
- if not k8s_config:
- raise Exception("Cannot find kubernetes config")
-
- k8s_config_plain = yaml.dump(k8s_config, allow_unicode=True)
-
- k8s_config_b64 = base64.b64encode(k8s_config_plain)
-
- return k8s_config_b64
-
-
-def _get_k8s_b64_env():
- env = dict()
- env['name'] = 'K8S_CONFIG_B64'
- env['value'] = _get_k8s_b64()
- return env
-
-
-def _retrieve_root_path():
- target_relationship = _retrieve_depends_on()
-
- apps_root_path = target_relationship.instance.runtime_properties.get(constants.RT_APPS_ROOT_PATH, None)
-
- ctx.logger.debug("Retrived apps root path = {}".format(apps_root_path))
-
- return apps_root_path
-
-def _retrieve_helm_cli_path():
- target_relationship = _retrieve_depends_on()
-
- helm_cli_path = target_relationship.instance.runtime_properties.get(constants.RT_HELM_CLI_PATH, None)
-
- ctx.logger.debug("Retrived helm clis path = {}".format(helm_cli_path))
-
- return helm_cli_path
-
-def _retrieve_depends_on():
- result = None
- for relationship in ctx.instance.relationships:
- if relationship.type == 'cloudify.relationships.depends_on':
- return relationship.target
-
-def _retrieve_managed_by_master():
- result = None
- for relationship in ctx.instance.relationships:
- if relationship.type == 'cloudify.kubernetes.relationships.managed_by_master':
- return relationship.target
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/workarounds.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/workarounds.py
deleted file mode 100644
index fe3e892c5b..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/common/workarounds.py
+++ /dev/null
@@ -1,67 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-from fabric import api as fabric_api
-
-def _retrieve_namespace():
- namespace = ctx.node.properties.get(
- 'namespace',
- ctx.node.properties
- .get('options', {})
- .get('namespace', None)
- )
-
- if not namespace:
- raise NonRecoverableError(
- 'Namespace is not defined (node={})'.format(ctx.node.name)
- )
-
- return namespace
-
-
-def configure_secret():
- namespace = _retrieve_namespace()
- ctx.logger.info(
- 'Configuring docker secrets for namespace: {0}'.format(namespace)
- )
-
- command = 'kubectl create secret ' \
- 'docker-registry onap-docker-registry-key ' \
- '--docker-server=nexus3.onap.org:10001 ' \
- '--docker-username=docker ' \
- '--docker-password=docker ' \
- '--docker-email=email@email.com ' \
- '--namespace={0}'.format(namespace)
-
- ctx.logger.info('Command "{0}" will be executed'.format(command))
-
- with fabric_api.settings(
- **ctx.node.properties.get('ssh_credentials')):
- fabric_api.run(command)
-
- ctx.logger.info('Docker secrets configured successfully')
-
-
-def _get_fabric_env():
- result = dict()
-
- result['host_string'] = ctx.node.properties.get('ssh_credentials')['host_string']
- result['user'] = ctx.node.properties.get('ssh_credentials')['user']
- result['key'] = ctx.node.properties.get('ssh_credentials')['key']
-
- return result
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/environment.py b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/environment.py
deleted file mode 100644
index 741f28ec32..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/environment.py
+++ /dev/null
@@ -1,58 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-
-from cloudify import ctx
-from cloudify.decorators import operation
-
-from common import constants
-from common import helm
-from common import init_pod, namespace
-
-
-@operation
-def create_init_pod(**kwargs):
- init_pod.do_create_init_pod()
- pass
-
-
-@operation
-def create_namespace(**kwargs):
- namespace.do_create_namespace()
-
-
-@operation
-def delete_init_pod(**kwargs):
- init_pod.do_delete_init_pod()
-
-
-@operation
-def delete_namespace(**kwargs):
- namespace.do_delete_namespace()
-
-
-@operation
-def setup_helm_templates(**kwargs):
- helm_url = constants.HELM_URL
- ctx.instance.runtime_properties[constants.RT_HELM_CLI_PATH] = helm.get_helm_path(helm_url)
- ctx.logger.debug('Helm cli path = {}'.format(ctx.instance.runtime_properties[constants.RT_HELM_CLI_PATH]))
-
- oom_git_url = constants.OOM_GIT_URL
- ctx.instance.runtime_properties[constants.RT_APPS_ROOT_PATH] = helm.get_apps_root_path(oom_git_url)
- ctx.logger.debug('Apps root path = {}'.format(ctx.instance.runtime_properties[constants.RT_APPS_ROOT_PATH]))
-
-
-
-
diff --git a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/requirements.txt b/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/requirements.txt
deleted file mode 100644
index 9d8d880932..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/k8s_installer/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1.zip \ No newline at end of file
diff --git a/cloudify-onap/plugins/onap-installation-plugin/plugin.yaml b/cloudify-onap/plugins/onap-installation-plugin/plugin.yaml
deleted file mode 100755
index f88b50bc78..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/plugin.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-plugins:
- onap:
- executor: central_deployment_agent
- package_name: onap-installation-plugin
- package_version: '1.0.0'
- source: onap-installation-plugin
- # install_arguments: '--process-dependency-links'
-
-node_types:
- cloudify.onap.kubernetes.Environment:
- derived_from: cloudify.nodes.Root
- properties:
- namespace:
- type: string
- init_pod:
- type: string
- description: >
- Path to init pod chart
- openstack_envs:
- description: >
- ONAP parameters defined at the onap-parameters.yaml file
- default: {}
- options:
- description: >
- For compatibility with kubernetes plugin.
- To be removed in the future.
- default: {}
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- implementation: onap.k8s_installer.environment.create_namespace
- executor: central_deployment_agent
- configure:
- implementation: onap.k8s_installer.environment.setup_helm_templates
- executor: central_deployment_agent
- start:
- implementation: onap.k8s_installer.environment.create_init_pod
- executor: central_deployment_agent
- stop:
- implementation: onap.k8s_installer.environment.delete_init_pod
- executor: central_deployment_agent
- delete:
- implementation: onap.k8s_installer.environment.delete_namespace
- executor: central_deployment_agent
-
- cloudify.onap.kubernetes.App:
- derived_from: cloudify.nodes.Root
- properties:
- name:
- type: string
- description: >
- Name of ONAP app
- path:
- type: string
- description: >
- Paths (relative, blueprint prespective) to app chart directory
- required: false
- options:
- description: >
- For compatibility with kubernetes plugin.
- To be removed in the future.
- default: {}
- ssh_credentials:
- description: >
- Workaround for docker secret settings
- via fabric plugin
- default:
- host_string: { get_secret: kubernetes_master_ip }
- user: { get_secret: agent_user }
- key: { get_secret: agent_key_private }
- interfaces:
- cloudify.interfaces.lifecycle:
- create:
- implementation: onap.k8s_installer.app.create_namespace
- executor: central_deployment_agent
- configure:
- implementation: onap.k8s_installer.app.configure_docker_secret
- executor: central_deployment_agent
- start:
- implementation: onap.k8s_installer.app.create_resources_services
- executor: central_deployment_agent
- stop:
- implementation: onap.k8s_installer.app.delete_resources_services
- executor: central_deployment_agent
- delete:
- implementation: onap.k8s_installer.app.delete_namespace
- executor: central_deployment_agent
diff --git a/cloudify-onap/plugins/onap-installation-plugin/requirements.txt b/cloudify-onap/plugins/onap-installation-plugin/requirements.txt
deleted file mode 100644
index 9d8d880932..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1.zip \ No newline at end of file
diff --git a/cloudify-onap/plugins/onap-installation-plugin/setup.py b/cloudify-onap/plugins/onap-installation-plugin/setup.py
deleted file mode 100755
index d413daa4ac..0000000000
--- a/cloudify-onap/plugins/onap-installation-plugin/setup.py
+++ /dev/null
@@ -1,40 +0,0 @@
-########
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-
-from setuptools import setup
-
-try:
- import cloudify_kubernetes
-except ImportError:
- import pip
- pip.main(['install', 'https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1.zip'])
-
-setup(
- name='onap-installation-plugin',
- version='1.0.0',
- author='',
- author_email='',
- packages=['k8s_installer', 'k8s_installer.common'],
- install_requires=[
- 'cloudify-plugins-common>=3.3.1',
- 'cloudify-kubernetes-plugin==1.2.1',
- #'/tmp/k8spl/cloudify-kubernetes-plugin'
- 'pyyaml',
- 'gitpython',
- 'paramiko==1.18.3',
- 'fabric==1.13.1'
- ]
-)
diff --git a/docs/Access_Rancher_server_via_web_browser.jpeg b/docs/Access_Rancher_server_via_web_browser.jpeg
new file mode 100644
index 0000000000..103fc2731f
--- /dev/null
+++ b/docs/Access_Rancher_server_via_web_browser.jpeg
Binary files differ
diff --git a/docs/Add_Kubernetes_Environment_to_Rancher.png b/docs/Add_Kubernetes_Environment_to_Rancher.png
new file mode 100644
index 0000000000..0d60f156fb
--- /dev/null
+++ b/docs/Add_Kubernetes_Environment_to_Rancher.png
Binary files differ
diff --git a/docs/Apply_customization_script_for_the_Rancher_VM.jpeg b/docs/Apply_customization_script_for_the_Rancher_VM.jpeg
new file mode 100644
index 0000000000..832dda3404
--- /dev/null
+++ b/docs/Apply_customization_script_for_the_Rancher_VM.jpeg
Binary files differ
diff --git a/docs/Click_create.jpeg b/docs/Click_create.jpeg
new file mode 100644
index 0000000000..fad185674c
--- /dev/null
+++ b/docs/Click_create.jpeg
Binary files differ
diff --git a/docs/Click_on_CLI_and_then_click_on_Generate_Config.jpeg b/docs/Click_on_CLI_and_then_click_on_Generate_Config.jpeg
new file mode 100644
index 0000000000..fec6a35433
--- /dev/null
+++ b/docs/Click_on_CLI_and_then_click_on_Generate_Config.jpeg
Binary files differ
diff --git a/docs/Click_on_Close_button.jpeg b/docs/Click_on_Close_button.jpeg
new file mode 100644
index 0000000000..cb02480923
--- /dev/null
+++ b/docs/Click_on_Close_button.jpeg
Binary files differ
diff --git a/docs/Click_on_Copy_to_Clipboard-wait_until_you_see_a_token-do_not_copy_user+password-the_server_is_not_ready_at_that_point.jpeg b/docs/Click_on_Copy_to_Clipboard-wait_until_you_see_a_token-do_not_copy_user+password-the_server_is_not_ready_at_that_point.jpeg
new file mode 100644
index 0000000000..bb0f60c4eb
--- /dev/null
+++ b/docs/Click_on_Copy_to_Clipboard-wait_until_you_see_a_token-do_not_copy_user+password-the_server_is_not_ready_at_that_point.jpeg
Binary files differ
diff --git a/docs/K8s-Assign_Floating_IP_for_external_access.jpeg b/docs/K8s-Assign_Floating_IP_for_external_access.jpeg
new file mode 100644
index 0000000000..fe84d11707
--- /dev/null
+++ b/docs/K8s-Assign_Floating_IP_for_external_access.jpeg
Binary files differ
diff --git a/docs/K8s-Key_Pair.jpeg b/docs/K8s-Key_Pair.jpeg
new file mode 100644
index 0000000000..1e335d36a6
--- /dev/null
+++ b/docs/K8s-Key_Pair.jpeg
Binary files differ
diff --git a/docs/K8s-Launch_Instance.jpeg b/docs/K8s-Launch_Instance.jpeg
new file mode 100644
index 0000000000..7e2122c429
--- /dev/null
+++ b/docs/K8s-Launch_Instance.jpeg
Binary files differ
diff --git a/docs/K8s-Launch_new_VM_instance_to_create_a_Kubernetes_single_host_or_cluster.jpeg b/docs/K8s-Launch_new_VM_instance_to_create_a_Kubernetes_single_host_or_cluster.jpeg
new file mode 100644
index 0000000000..8d154d4a2d
--- /dev/null
+++ b/docs/K8s-Launch_new_VM_instance_to_create_a_Kubernetes_single_host_or_cluster.jpeg
Binary files differ
diff --git a/docs/K8s-Manage_Floating_IP_Associations.jpeg b/docs/K8s-Manage_Floating_IP_Associations.jpeg
new file mode 100644
index 0000000000..f15e5e34c7
--- /dev/null
+++ b/docs/K8s-Manage_Floating_IP_Associations.jpeg
Binary files differ
diff --git a/docs/K8s-Networking.jpeg b/docs/K8s-Networking.jpeg
new file mode 100644
index 0000000000..ee4cafca54
--- /dev/null
+++ b/docs/K8s-Networking.jpeg
Binary files differ
diff --git a/docs/K8s-Security_Group.jpeg b/docs/K8s-Security_Group.jpeg
new file mode 100644
index 0000000000..5e852185b2
--- /dev/null
+++ b/docs/K8s-Security_Group.jpeg
Binary files differ
diff --git a/docs/K8s-Select_Flavor.jpeg b/docs/K8s-Select_Flavor.jpeg
new file mode 100644
index 0000000000..c3cf4b2c83
--- /dev/null
+++ b/docs/K8s-Select_Flavor.jpeg
Binary files differ
diff --git a/docs/K8s-Select_Ubuntu_16.04_as_base_image.jpeg b/docs/K8s-Select_Ubuntu_16.04_as_base_image.jpeg
new file mode 100644
index 0000000000..e9a03e782c
--- /dev/null
+++ b/docs/K8s-Select_Ubuntu_16.04_as_base_image.jpeg
Binary files differ
diff --git a/docs/Rancher-Allocate_Floating_IP.jpeg b/docs/Rancher-Allocate_Floating_IP.jpeg
new file mode 100644
index 0000000000..9d6ff3ff95
--- /dev/null
+++ b/docs/Rancher-Allocate_Floating_IP.jpeg
Binary files differ
diff --git a/docs/Rancher-Key_Pair.jpeg b/docs/Rancher-Key_Pair.jpeg
new file mode 100644
index 0000000000..1bcb6b6eb7
--- /dev/null
+++ b/docs/Rancher-Key_Pair.jpeg
Binary files differ
diff --git a/docs/Rancher-Launch_Instance.jpeg b/docs/Rancher-Launch_Instance.jpeg
new file mode 100644
index 0000000000..38943699e6
--- /dev/null
+++ b/docs/Rancher-Launch_Instance.jpeg
Binary files differ
diff --git a/docs/Rancher-Launch_new_VM_instance_to_host_the_Rancher_Server.jpeg b/docs/Rancher-Launch_new_VM_instance_to_host_the_Rancher_Server.jpeg
new file mode 100644
index 0000000000..2b3fde3fb7
--- /dev/null
+++ b/docs/Rancher-Launch_new_VM_instance_to_host_the_Rancher_Server.jpeg
Binary files differ
diff --git a/docs/Rancher-Manage_Floating_IP_Associations.jpeg b/docs/Rancher-Manage_Floating_IP_Associations.jpeg
new file mode 100644
index 0000000000..ae062c1d39
--- /dev/null
+++ b/docs/Rancher-Manage_Floating_IP_Associations.jpeg
Binary files differ
diff --git a/docs/Rancher-Networking.jpeg b/docs/Rancher-Networking.jpeg
new file mode 100644
index 0000000000..16ef380ef5
--- /dev/null
+++ b/docs/Rancher-Networking.jpeg
Binary files differ
diff --git a/docs/Rancher-Security_Groups.jpeg b/docs/Rancher-Security_Groups.jpeg
new file mode 100644
index 0000000000..c8ff2e8d76
--- /dev/null
+++ b/docs/Rancher-Security_Groups.jpeg
Binary files differ
diff --git a/docs/Rancher-Select_Flavor.jpeg b/docs/Rancher-Select_Flavor.jpeg
new file mode 100644
index 0000000000..a3f35b53ce
--- /dev/null
+++ b/docs/Rancher-Select_Flavor.jpeg
Binary files differ
diff --git a/docs/Rancher-Select_Ubuntu_16.04_as_base_image.jpeg b/docs/Rancher-Select_Ubuntu_16.04_as_base_image.jpeg
new file mode 100644
index 0000000000..b80fae4a5c
--- /dev/null
+++ b/docs/Rancher-Select_Ubuntu_16.04_as_base_image.jpeg
Binary files differ
diff --git a/docs/Return_to_Rancher_environment_eg_SB4_and_wait_for_services_to_complete_10-15_mins.jpeg b/docs/Return_to_Rancher_environment_eg_SB4_and_wait_for_services_to_complete_10-15_mins.jpeg
new file mode 100644
index 0000000000..6d62b99f55
--- /dev/null
+++ b/docs/Return_to_Rancher_environment_eg_SB4_and_wait_for_services_to_complete_10-15_mins.jpeg
Binary files differ
diff --git a/docs/Select_Add_Environment.png b/docs/Select_Add_Environment.png
new file mode 100644
index 0000000000..48fffa41bd
--- /dev/null
+++ b/docs/Select_Add_Environment.png
Binary files differ
diff --git a/docs/and_click_on_Save_accept_defaults.jpeg b/docs/and_click_on_Save_accept_defaults.jpeg
new file mode 100644
index 0000000000..9b57927aac
--- /dev/null
+++ b/docs/and_click_on_Save_accept_defaults.jpeg
Binary files differ
diff --git a/docs/index.rst b/docs/index.rst
index 712802d619..340b43be67 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -14,3 +14,4 @@ OOM Documentation Repository
oom_developer_guide.rst
oom_cloud_setup_guide.rst
release-notes.rst
+ oom_setup_kubernetes_rancher.rst
diff --git a/docs/k8s-topology.jpg b/docs/k8s-topology.jpg
new file mode 100644
index 0000000000..eba8e72b68
--- /dev/null
+++ b/docs/k8s-topology.jpg
Binary files differ
diff --git a/docs/master_nfs_node.sh b/docs/master_nfs_node.sh
new file mode 100644
index 0000000000..4a7a8dbc12
--- /dev/null
+++ b/docs/master_nfs_node.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+usage () {
+ echo "Usage:"
+ echo " ./$(basename $0) node1_ip node2_ip ... nodeN_ip"
+ exit 1
+}
+
+if [ "$#" -lt 1 ]; then
+ echo "Missing NFS slave nodes"
+ usage
+fi
+
+#Install NFS kernel
+sudo apt-get update
+sudo apt-get install -y nfs-kernel-server
+
+#Create /dockerdata-nfs and set permissions
+sudo mkdir -p /dockerdata-nfs
+sudo chmod 777 -R /dockerdata-nfs
+sudo chown nobody:nogroup /dockerdata-nfs/
+
+#Update the /etc/exports
+NFS_EXP=""
+for i in $@; do
+ NFS_EXP+="$i(rw,sync,no_root_squash,no_subtree_check) "
+done
+echo "/dockerdata-nfs "$NFS_EXP | sudo tee -a /etc/exports
+
+#Restart the NFS service
+sudo exportfs -a
+sudo systemctl restart nfs-kernel-server
diff --git a/docs/oom_cloud_setup_guide.rst b/docs/oom_cloud_setup_guide.rst
index 7582a4a03d..73509669e8 100644
--- a/docs/oom_cloud_setup_guide.rst
+++ b/docs/oom_cloud_setup_guide.rst
@@ -12,6 +12,7 @@
.. _Setting Up Kubernetes with Rancher: https://wiki.onap.org/display/DW/ONAP+on+Kubernetes+on+Rancher
.. _Setting Up Kubernetes with Kubeadm: https://wiki.onap.org/display/DW/Deploying+Kubernetes+Cluster+with+kubeadm
.. _Setting Up Kubernetes with Cloudify: https://wiki.onap.org/display/DW/ONAP+on+Kubernetes+on+Cloudify
+.. _ONAP on Kubernetes Wiki: https://wiki.onap.org/display/DW/ONAP+on+Kubernetes
.. figure:: oomLogoV2-medium.png
:align: right
@@ -63,26 +64,38 @@ least three if not six nodes to ensure there is no single point of failure.
===== ===== ====== ====================
RAM HD vCores Ports
===== ===== ====== ====================
- 128GB 160GB 16 0.0.0.0/0 (all open)
+ 128GB 160GB 32 0.0.0.0/0 (all open)
===== ===== ====== ====================
+.. note::
+ Kubernetes supports a maximum of 110 pods per node which forces one to use at
+ least two nodes to deploy all of ONAP although at least three are recommended
+ (for example 4x32GB - 8 vCores each). Subsets of ONAP may still be deployed
+ on a single node.
+
Cloud Installation
==================
-#. OOM supports deployment on major public clouds. The following guides
- provide instructions on how to deploy ONAP on these clouds:
-
- - `Microsoft Azure`_,
- - `Amazon AWS`_,
- - `Google GCE`_,
- - `VMware VIO`_,
- - IBM, and
- - `Openstack`_.
-
-#. Alternatively, OOM can be deployed on a private set of physical hosts or VMs
- (or even a combination of the two). The following guides describe how to
- create a Kubernetes cluster with popular tools:
-
- - `Setting up Kubernetes with Rancher`_ (recommended)
- - `Setting up Kubernetes with Kubeadm`_
- - `Setting up Kubernetes with Cloudify`_
+.. #. OOM supports deployment on major public clouds. The following guides
+.. provide instructions on how to deploy ONAP on these clouds:
+..
+.. - `Microsoft Azure`_,
+.. - `Amazon AWS`_,
+.. - `Google GCE`_,
+.. - `VMware VIO`_,
+.. - IBM, and
+.. - `Openstack`_.
+..
+.. #. Alternatively, OOM can be deployed on a private set of physical hosts or VMs
+.. (or even a combination of the two). The following guides describe how to
+.. create a Kubernetes cluster with popular tools:
+..
+.. - `Setting up Kubernetes with Rancher`_ (recommended)
+.. - `Setting up Kubernetes with Kubeadm`_
+.. - `Setting up Kubernetes with Cloudify`_
+
+OOM can be deployed on a private set of physical hosts or VMs (or even a
+combination of the two). The following guide describe the recommended method to
+setup a Kubernetes cluster: :ref:`onap-on-kubernetes-with-rancher`.
+
+There are alternative deployment methods described on the `ONAP on Kubernetes Wiki`_
diff --git a/docs/oom_quickstart_guide.rst b/docs/oom_quickstart_guide.rst
index eb5d373721..f85d33bb47 100644
--- a/docs/oom_quickstart_guide.rst
+++ b/docs/oom_quickstart_guide.rst
@@ -16,7 +16,7 @@ available), follow the following instructions to deploy ONAP.
**Step 1.** Clone the OOM repository from ONAP gerrit::
- > git clone http://gerrit.onap.org/r/oom
+ > git clone -b beijing http://gerrit.onap.org/r/oom
> cd oom/kubernetes
@@ -130,6 +130,13 @@ may want to selectively enable or disable ONAP components by changing the
vnfsdk:
enabled: true
+.. note::
+ To generate openStackEncryptedPasswordHere :
+
+ root@olc-rancher:~# cd so/resources/config/mso/
+
+ root@olc-rancher:~/oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p
+
**Step 3.** To setup a local Helm server to server up the ONAP charts::
> helm serve &
diff --git a/docs/oom_setup_kubernetes_rancher.rst b/docs/oom_setup_kubernetes_rancher.rst
new file mode 100644
index 0000000000..8e55bf1b66
--- /dev/null
+++ b/docs/oom_setup_kubernetes_rancher.rst
@@ -0,0 +1,478 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2018 Amdocs, Bell Canada
+
+.. Links
+.. _HELM Best Practices Guide: https://docs.helm.sh/chart_best_practices/#requirements
+.. _kubectl Cheat Sheet: https://kubernetes.io/docs/reference/kubectl/cheatsheet/
+.. _Kubernetes documentation for emptyDir: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir
+.. _Docker DevOps: https://wiki.onap.org/display/DW/Docker+DevOps#DockerDevOps-DockerBuild
+.. _http://cd.onap.info:30223/mso/logging/debug: http://cd.onap.info:30223/mso/logging/debug
+.. _Onboarding and Distributing a Vendor Software Product: https://wiki.onap.org/pages/viewpage.action?pageId=1018474
+.. _README.md: https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/README.md
+
+.. figure:: oomLogoV2-medium.png
+ :align: right
+
+.. _onap-on-kubernetes-with-rancher:
+
+ONAP on Kubernetes with Rancher
+###############################
+
+The following instructions will step you through the installation of Kubernetes
+on an OpenStack environment with Rancher. The development lab used for this
+installation is the ONAP Windriver lab.
+
+This guide does not cover all of the steps required to setup your OpenStack
+environment: e.g. OAM networks and security groups but there is a wealth of
+OpenStack information on the web.
+
+Rancher Installation
+====================
+
+The following instructions describe how to create an Openstack VM running
+Rancher. This node will not be used to host ONAP itself, it will be used
+exclusively by Rancher.
+
+Launch new VM instance to host the Rancher Server
+-------------------------------------------------
+
+.. image:: Rancher-Launch_new_VM_instance_to_host_the_Rancher_Server.jpeg
+
+Select Ubuntu 16.04 as base image
+---------------------------------
+Select "No" on "Create New Volume"
+
+.. image:: Rancher-Select_Ubuntu_16.04_as_base_image.jpeg
+
+Select Flavor
+-------------
+Known issues exist if flavor is too small for Rancher. Please select a flavor
+with at least 4 vCPU and 8GB ram.
+
+.. image:: Rancher-Select_Flavor.jpeg
+
+Networking
+----------
+
+.. image:: Rancher-Networking.jpeg
+
+Security Groups
+---------------
+
+.. image:: Rancher-Security_Groups.jpeg
+
+Key Pair
+--------
+Use an existing key pair (e.g. onap_key), import an existing one or create a
+new one to assign.
+
+.. image:: Rancher-Key_Pair.jpeg
+
+Apply customization script for the Rancher VM
+---------------------------------------------
+
+Click :download:`openstack-rancher.sh <openstack-rancher.sh>` to download the script.
+
+.. literalinclude:: openstack-rancher.sh
+ :language: bash
+
+This customization script will:
+
+* setup root access to the VM (comment out if you wish to disable this
+ capability and restrict access to ssh access only)
+* install docker
+* install rancher
+* install kubectl
+* install helm
+* install nfs server
+
+.. note::
+ The Beijing release of OOM only supports Helm 2.8.2 not the 2.7.2 shown in
+ the screen capture below. The supported versions of all the software components
+ are listed in the :ref:`cloud-setup-guide-label`.
+
+.. image:: Apply_customization_script_for_the_Rancher_VM.jpeg
+
+Launch Instance
+---------------
+
+.. image:: Rancher-Launch_Instance.jpeg
+
+Assign Floating IP for external access
+--------------------------------------
+
+.. image:: Rancher-Allocate_Floating_IP.jpeg
+
+.. image:: Rancher-Manage_Floating_IP_Associations.jpeg
+
+.. image:: Rancher-Launch_Instance.jpeg
+
+Kubernetes Installation
+=======================
+
+Launch new VM instance(s) to create a Kubernetes single host or cluster
+-----------------------------------------------------------------------
+
+To create a cluster:
+
+.. note::
+ #. do not append a '-1' suffix (e.g. sb4-k8s)
+ #. increase count to the # of of kubernetes worker nodes you want (eg. 3)
+
+.. image:: K8s-Launch_new_VM_instance_to_create_a_Kubernetes_single_host_or_cluster.jpeg
+
+Select Ubuntu 16.04 as base image
+---------------------------------
+Select "No" on "Create New Volume"
+
+.. image:: K8s-Select_Ubuntu_16.04_as_base_image.jpeg
+
+Select Flavor
+-------------
+The size of a Kubernetes host depends on the size of the ONAP deployment that
+will be installed.
+
+As of the Beijing release a minimum of 3 x 32GB hosts will be needed to run a
+full ONAP deployment (all components).
+
+If a small subset of ONAP components are being deployed for testing purposes,
+then a single 16GB or 32GB host should suffice.
+
+.. image:: K8s-Select_Flavor.jpeg
+
+Networking
+-----------
+
+.. image:: K8s-Networking.jpeg
+
+Security Group
+---------------
+
+.. image:: K8s-Security_Group.jpeg
+
+Key Pair
+--------
+Use an existing key pair (e.g. onap_key), import an existing one or create a
+new one to assign.
+
+.. image:: K8s-Key_Pair.jpeg
+
+Apply customization script for Kubernetes VM(s)
+-----------------------------------------------
+
+Click :download:`openstack-k8s-node.sh <openstack-k8s-node.sh>` to
+download the script.
+
+.. literalinclude:: openstack-k8s-node.sh
+ :language: bash
+
+This customization script will:
+
+* setup root access to the VM (comment out if you wish to disable this
+ capability and restrict access to ssh access only)
+* install docker
+* install kubectl
+* install helm
+* install nfs common (see configuration step here)
+
+.. note::
+ Ensure you are using the correct versions as described in the
+ :ref:`cloud-setup-guide-label`
+
+Launch Instance
+---------------
+
+.. image:: K8s-Launch_Instance.jpeg
+
+Assign Floating IP for external access
+--------------------------------------
+
+.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg
+
+.. image:: K8s-Manage_Floating_IP_Associations.jpeg
+
+.. image:: K8s-Launch_Instance.jpeg
+
+Setting up an NFS share for Multinode Kubernetes Clusters
+=========================================================
+The figure below illustrates a possible topology of a multinode Kubernetes
+cluster.
+
+.. image:: k8s-topology.jpg
+
+One node, the Master Node, runs Rancher and Helm clients and connects to all
+the Kubernetes nodes in the cluster. Kubernetes nodes, in turn, run Rancher,
+Kubernetes and Tiller (Helm) agents, which receive, execute, and respond to
+commands issued by the Master Node (e.g. kubectl or helm operations). Note that
+the Master Node can be either a remote machine that the user can log in to or a
+local machine (e.g. laptop, desktop) that has access to the Kubernetes cluster.
+
+Deploying applications to a Kubernetes cluster requires Kubernetes nodes to
+share a common, distributed filesystem. One node in the cluster plays the role
+of NFS Master (not to confuse with the Master Node that runs Rancher and Helm
+clients, which is located outside the cluster), while all the other cluster
+nodes play the role of NFS slaves. In the figure above, the left-most cluster
+node plays the role of NFS Master (indicated by the crown symbol). To properly
+set up an NFS share on Master and Slave nodes, the user can run the scripts
+below.
+
+Click :download:`master_nfs_node.sh <master_nfs_node.sh>` to download the script.
+
+.. literalinclude:: master_nfs_node.sh
+ :language: bash
+
+Click :download:`slave_nfs_node.sh <slave_nfs_node.sh>` to download the script.
+
+.. literalinclude:: slave_nfs_node.sh
+ :language: bash
+
+The master_nfs_node.sh script runs in the NFS Master node and needs the list of
+NFS Slave nodes as input, e.g.::
+
+ > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip
+
+The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of
+the NFS Master node as input, e.g.::
+
+ > sudo ./slave_nfs_node.sh master_node_ip
+
+Configuration (Rancher and Kubernetes)
+======================================
+
+Access Rancher server via web browser
+-------------------------------------
+(e.g. http://10.12.6.16:8080/env/1a5/apps/stacks)
+
+.. image:: Access_Rancher_server_via_web_browser.jpeg
+
+Add Kubernetes Environment to Rancher
+-------------------------------------
+
+1. Select “Manage Environments”
+
+.. image:: Add_Kubernetes_Environment_to_Rancher.png
+
+2. Select “Add Environment”
+
+.. image:: Select_Add_Environment.png
+
+3. Add unique name for your new Rancher environment
+
+4. Select the Kubernetes template
+
+5. Click "create"
+
+.. image:: Click_create.jpeg
+
+6. Select the new named environment (ie. SB4) from the dropdown list (top left).
+
+Rancher is now waiting for a Kubernetes Host to be added.
+
+.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg
+
+Add Kubernetes Host
+-------------------
+
+1. If this is the first (or only) host being added - click on the "Add a host" link
+
+.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg
+
+and click on "Save" (accept defaults).
+
+.. image:: and_click_on_Save_accept_defaults.jpeg
+
+otherwise select INFRASTRUCTURE→ Hosts and click on "Add Host"
+
+.. image:: otherwise_select_INFRASTRUCTURE_Hosts_and_click_on_Add_Host.jpg
+
+2. Enter the management IP for the k8s VM (e.g. 10.0.0.4) that was just created.
+
+3. Click on “Copy to Clipboard” button
+
+4. Click on “Close” button
+
+.. image:: Click_on_Close_button.jpeg
+
+Without the 10.0.0.4 IP - the CATTLE_AGENT will be derived on the host - but it
+may not be a routable IP.
+
+Configure Kubernetes Host
+-------------------------
+
+1. Login to the new Kubernetes Host::
+
+ > ssh -i ~/oom-key.pem ubuntu@10.12.5.1
+ The authenticity of host '10.12.5.172 (10.12.5.172)' can't be established.
+ ECDSA key fingerprint is SHA256:tqxayN58nCJKOJcWrEZzImkc0qKQHDDfUTHqk4WMcEI.
+ Are you sure you want to continue connecting (yes/no)? yes
+ Warning: Permanently added '10.12.5.172' (ECDSA) to the list of known hosts.
+ Welcome to Ubuntu 16.04.2 LTS (GNU/Linux 4.4.0-64-generic x86_64)
+
+ * Documentation: https://help.ubuntu.com
+ * Management: https://landscape.canonical.com
+ * Support: https://ubuntu.com/advantage
+
+ Get cloud support with Ubuntu Advantage Cloud Guest:
+ http://www.ubuntu.com/business/services/cloud
+
+ 180 packages can be updated.
+ 100 updates are security updates.
+
+ The programs included with the Ubuntu system are free software;
+ the exact distribution terms for each program are described in the
+ individual files in /usr/share/doc/*/copyright.
+
+ Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by
+ applicable law.
+
+ To run a command as administrator (user "root"), use "sudo <command>".
+ See "man sudo_root" for details.
+
+ ubuntu@sb4-k8s-1:~$
+
+
+2. Paste Clipboard content and hit enter to install Rancher Agent::
+
+ ubuntu@sb4-k8s-1:~$ sudo docker run -e CATTLE_AGENT_IP="10.0.0.4“ --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.9 http://10.12.6.16:8080/v1/scripts/5D757C68BD0A2125602A:1514678400000:yKW9xHGJDLvq6drz2eDzR2mjato
+ Unable to find image 'rancher/agent:v1.2.9' locally
+ v1.2.9: Pulling From rancher/agent
+ b3e1c725a85f: Pull complete
+ 6071086409fc: Pull complete
+ d0ac3b234321: Pull complete
+ 87f567b5cf58: Pull complete
+ a63e24b217c4: Pull complete
+ d0a3f58caef0: Pull complete
+ 16914729cfd3: Pull complete
+ dc5c21984c5b: Pull complete
+ d7e8f9784b20: Pull complete
+ Digest: sha256:c21255ac4d94ffbc7b523F870F20ea5189b68Fa3d642800adb4774aab4748e66
+ Status: Downloaded newer image for rancher/agent:v1.2.9
+
+ INFO: Running Agent Registration Process, CATTLE_URL=http://10.12.6.16:8080/v1
+ INFO: Attempting to connect to: http://10.12.6.16:8080/v1
+ INFO: http://10.12.6.16:8080/v1 is accessible
+ INFO: Inspecting host capabilities
+ INFO: Boot2Docker: false
+ INFO: Host writable: true
+ INFO: Token: xxxxxxxx
+ INFO: Running registration
+ INFO: Printing Environment
+ INFO: ENV: CATTLE_ACCESS_KEY=98B35AC484FBF820E0AD
+ INFO: ENV: CATTLE_AGENT_IP=10.0.9.4
+ INFO: ENV: CATTLE_HOME=/var/lib/cattle
+ INFO: ENV: CATTLE_REGISTRATION_ACCESS_KEY=registrationToken
+ INFO: ENV: CATTLE_REGISTRATION_SECRET_KEY=xxxxxxx
+ INFO: ENV: CATTLE_SECRET_KEY=xxxxxxx
+ INFO: ENV: CATTLE_URL=http://10.12.6.16:8080/v1
+ INFO: ENV: DETECTED_CATTLE_AGENT_IP=10.12.5.172
+ INFO: ENV: RANCHER_AGENT_IMAGE=rancher/agent:v1.2.9
+ INFO: Launched Rancher Agent: c27ee0f3dc4c783b0db647ea1f73c35b3843a4b8d60b96375b1a05aa77d83136
+ ubuntu@sb4-k8s-1:~$
+
+3. Return to Rancher environment (e.g. SB4) and wait for services to complete
+ (~ 10-15 mins)
+
+.. image:: Return_to_Rancher_environment_eg_SB4_and_wait_for_services_to_complete_10-15_mins.jpeg
+
+Configure kubectl and helm
+==========================
+In this example we are configuring kubectl and helm that have been installed
+(as a convenience) onto the rancher and kubernetes hosts. Typically you would
+install them both on your PC and remotely connect to the cluster. The following
+procedure would remain the same.
+
+1. Click on CLI and then click on “Generate Config”
+
+.. image:: Click_on_CLI_and_then_click_on_Generate_Config.jpeg
+
+2. Click on “Copy to Clipboard” - wait until you see a "token" - do not copy
+ user+password - the server is not ready at that point
+
+.. image:: Click_on_Copy_to_Clipboard-wait_until_you_see_a_token-do_not_copy_user+password-the_server_is_not_ready_at_that_point.jpeg
+
+3. Create a .kube directory in user directory (if one does not exist)::
+
+ ubuntu@sb4-kSs-1:~$ mkdir .kube
+ ubuntu@sb4-kSs-1:~$ vi .kube/config
+
+4. Paste contents of Clipboard into a file called “config” and save the file::
+
+ apiVersion: v1
+ kind : Config
+ clusters:
+ - cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://10.12.6.16:8080/r/projects/1a7/kubernetes:6443"
+ name: "SB4"
+ contexts:
+ - context:
+ cluster: "SB4"
+ user: "SB4"
+ name: "SB4"
+ current-context: "SB4"
+ users:
+ - name: "SB4"
+ user:
+ token: "QmFzaWMgTlRBd01qZzBOemc)TkRrMk1UWkNOMFpDTlVFNlExcHdSa1JhVZreE5XSm1TRGhWU2t0Vk1sQjVhalZaY0dWaFVtZGFVMHQzWW1WWVJtVmpSQT09"
+ ~
+ ~
+ ~
+ - INSERT --
+
+5. Validate that kubectl is able to connect to the kubernetes cluster::
+
+ ubuntu@sb4-k8s-1:~$ kubectl config get-contexts
+ CURRENT NAME CLUSTER AUTHINFO NAMESPACE
+ * SB4 SB4 SB4
+ ubuntu@sb4-kSs-1:~$
+
+and show running pods::
+
+ ubuntu@sb4-k8s-1:~$ kubectl get pods --all-namespaces -o=wide
+ NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE
+ kube-system heapster—7Gb8cd7b5 -q7p42 1/1 Running 0 13m 10.42.213.49 sb4-k8s-1
+ kube-system kube-dns-5d7bM87c9-c6f67 3/3 Running 0 13m 10.42.181.110 sb4-k8s-1
+ kube-system kubernetes-dashboard-f9577fffd-kswjg 1/1 Running 0 13m 10.42.105.113 sb4-k8s-1
+ kube-system monitoring-grafana-997796fcf-vg9h9 1/1 Running 0 13m 10.42,141.58 sb4-k8s-1
+ kube-system monitoring-influxdb-56chd96b-hk66b 1/1 Running 0 13m 10.4Z.246.90 sb4-k8s-1
+ kube-system tiller-deploy-cc96d4f6b-v29k9 1/1 Running 0 13m 10.42.147.248 sb4-k8s-1
+ ubuntu@sb4-k8s-1:~$
+
+6. Validate helm is running at the right version. If not, an error like this
+ will be displayed::
+
+ ubuntu@sb4-k8s-1:~$ helm list
+ Error: incompatible versions c1ient[v2.8.2] server[v2.6.1]
+ ubuntu@sb4-k8s-1:~$
+
+7. Upgrade the server-side component of helm (tiller) via `helm init --upgrade`::
+
+ ubuntu@sb4-k8s-1:~$ helm init --upgrade
+ Creating /home/ubuntu/.helm
+ Creating /home/ubuntu/.helm/repository
+ Creating /home/ubuntu/.helm/repository/cache
+ Creating /home/ubuntu/.helm/repository/local
+ Creating /home/ubuntu/.helm/plugins
+ Creating /home/ubuntu/.helm/starters
+ Creating /home/ubuntu/.helm/cache/archive
+ Creating /home/ubuntu/.helm/repository/repositories.yaml
+ Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com
+ Adding local repo with URL: http://127.0.0.1:8879/charts
+ $HELM_HOME has been configured at /home/ubuntu/.helm.
+
+ Tiller (the Helm server-side component) has been upgraded to the current version.
+ Happy Helming!
+ ubuntu@sb4-k8s-1:~$
+
+ONAP Deployment via OOM
+=======================
+Now that kubernetes and Helm are installed and configured you can prepare to
+deploy ONAP. Follow the instructions in the README.md_ or look at the official
+documentation to get started:
+
+- :ref:`quick-start-label` - deploy ONAP on an existing cloud
+- :ref:`user-guide-label` - a guide for operators of an ONAP instance
+
+
diff --git a/docs/oom_user_guide.rst b/docs/oom_user_guide.rst
index fbd3d9ea97..f79de5950f 100644
--- a/docs/oom_user_guide.rst
+++ b/docs/oom_user_guide.rst
@@ -75,8 +75,8 @@ Install Helm
~~~~~~~~~~~~
Helm is used by OOM for package and configuration management. To install Helm, enter the following::
- > wget http://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-linux-amd64.tar.gz
- > tar -zxvf helm-v2.7.2-linux-amd64.tar.gz
+ > wget http://storage.googleapis.com/kubernetes-helm/helm-v2.8.2-linux-amd64.tar.gz
+ > tar -zxvf helm-v2.8.2-linux-amd64.tar.gz
> sudo mv linux-amd64/helm /usr/local/bin/helm
Verify the Helm version with::
@@ -108,7 +108,7 @@ stable which should be removed to avoid confusion::
To prepare your system for an installation of ONAP, you'll need to::
- > git clone http://gerrit.onap.org/r/oom
+ > git clone -b beijing http://gerrit.onap.org/r/oom
> cd oom/kubernetes
diff --git a/docs/openstack-k8s-node.sh b/docs/openstack-k8s-node.sh
new file mode 100644
index 0000000000..7d49ad5033
--- /dev/null
+++ b/docs/openstack-k8s-node.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+DOCKER_VERSION=17.03
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.8.2
+
+# setup root access - default login: oom/oom - comment out to restrict access too ssh key only
+sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
+sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
+service sshd restart
+echo -e "oom\noom" | passwd root
+
+apt-get update
+curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+mkdir -p /etc/systemd/system/docker.service.d/
+cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
+[Service]
+ExecStart=
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
+EOF
+systemctl daemon-reload
+systemctl restart docker
+apt-mark hold docker-ce
+
+IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
+HOSTNAME=`hostname`
+
+echo "$IP_ADDY $HOSTNAME" >> /etc/hosts
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+sudo apt-get install make -y
+
+sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+sudo chmod +x ./kubectl
+sudo mv ./kubectl /usr/local/bin/kubectl
+sudo mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/helm
+
+# install nfs
+sudo apt-get install nfs-common -y
+
+
+exit 0
diff --git a/docs/openstack-rancher.sh b/docs/openstack-rancher.sh
new file mode 100644
index 0000000000..d51abe4d68
--- /dev/null
+++ b/docs/openstack-rancher.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+DOCKER_VERSION=17.03
+RANCHER_VERSION=1.6.14
+KUBECTL_VERSION=1.8.10
+HELM_VERSION=2.8.2
+
+# setup root access - default login: oom/oom - comment out to restrict access too ssh key only
+sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
+sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
+service sshd restart
+echo -e "oom\noom" | passwd root
+
+apt-get update
+curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+mkdir -p /etc/systemd/system/docker.service.d/
+cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
+[Service]
+ExecStart=
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
+EOF
+systemctl daemon-reload
+systemctl restart docker
+apt-mark hold docker-ce
+
+IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
+HOSTNAME=`hostname`
+
+echo "$IP_ADDY $HOSTNAME" >> /etc/hosts
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+sudo apt-get install make -y
+
+sudo docker run -d --restart=unless-stopped -p 8080:8080 --name rancher_server rancher/server:v$RANCHER_VERSION
+sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+sudo chmod +x ./kubectl
+sudo mv ./kubectl /usr/local/bin/kubectl
+sudo mkdir ~/.kube
+wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/helm
+
+# nfs server
+sudo apt-get install nfs-kernel-server -y
+
+sudo mkdir -p /nfs_share
+sudo chown nobody:nogroup /nfs_share/
+
+
+exit 0
diff --git a/docs/otherwise_select_INFRASTRUCTURE_Hosts_and_click_on_Add_Host.jpg b/docs/otherwise_select_INFRASTRUCTURE_Hosts_and_click_on_Add_Host.jpg
new file mode 100644
index 0000000000..18af758a0d
--- /dev/null
+++ b/docs/otherwise_select_INFRASTRUCTURE_Hosts_and_click_on_Add_Host.jpg
Binary files differ
diff --git a/docs/release-notes.rst b/docs/release-notes.rst
index 2573c2ca02..3cb547cce3 100644
--- a/docs/release-notes.rst
+++ b/docs/release-notes.rst
@@ -8,66 +8,10 @@
Release Notes
=============
-Version: 1.1.0
---------------
-
-:Release Date: 2017-11-16
-
-**New Features**
-
-The Amsterdam release is the first release of the ONAP Operations Manager (OOM).
-
-The main goal of the Amsterdam release was to:
-
- - Support Flexible Platform Deployment via Kubernetes of fully containerized OMAP components - on any type of environment.
- - Support State Management of ONAP platform components.
- - Support full production ONAP deployment and any variation of component level deployment for development.
- - Platform Operations Orchestration / Control Loop Actions.
- - Platform centralized logging with ELK stack.
-
-**Bug Fixes**
-
- The full list of implemented user stories and epics is available on `JIRA <https://jira.onap.org/secure/RapidBoard.jspa?rapidView=41&view=planning.nodetail&epics=visible>`_
- This is the first release of OOM, the defects fixed in this release were raised during the course of the release.
- Anything not closed is captured below under Known Issues. If you want to review the defects fixed in the Amsterdam release, refer to Jira link above.
-
-**Known Issues**
- - `OOM-6 <https://jira.onap.org/browse/OOM-6>`_ Automated platform deployment on Docker/Kubernetes
-
- VFC, AAF, MSB minor issues.
-
- Workaround: Manual configuration changes - however the reference vFirewall use case does not currently require these components.
-
- - `OOM-10 <https://jira.onap.org/browse/OOM-10>`_ Platform configuration management.
-
- OOM ONAP Configuration Management - Handling of Secrets.
-
- Workaround: Automated workaround to be able to pull from protected docker repositories.
-
-
-**Security Issues**
- N/A
-
-
-**Upgrade Notes**
-
- N/A
-
-**Deprecation Notes**
-
- N/A
-
-**Other**
-
-
-
-===========
-
-
Version 2.0.0 Beijing Release
-----------------------------
-:Release Date: 2018-05-24
+:Release Date: 2018-06-07
Epic
****
@@ -75,7 +19,7 @@ Epic
* [`OOM-6 <https://jira.onap.org/browse/OOM-6>`_] - Automated platform deployment on Docker/Kubernetes
* [`OOM-7 <https://jira.onap.org/browse/OOM-7>`_] - Platform monitoring and auto-healing
* [`OOM-8 <https://jira.onap.org/browse/OOM-8>`_] - Automated platform scalability
-* [`OOM-9 <https://jira.onap.org/browse/OOM-9>`_] - Platform upgradability&rollbacks
+* [`OOM-9 <https://jira.onap.org/browse/OOM-9>`_] - Platform upgradability & rollbacks
* [`OOM-10 <https://jira.onap.org/browse/OOM-10>`_] - Platform configuration management
* [`OOM-46 <https://jira.onap.org/browse/OOM-46>`_] - Platform infrastructure deployment with TOSCA
* [`OOM-109 <https://jira.onap.org/browse/OOM-109>`_] - Platform Centralized Logging
@@ -237,7 +181,6 @@ Task
* [`OOM-624 <https://jira.onap.org/browse/OOM-624>`_] - CII security badging: cleartext password for keystone and docker repo creds
* [`OOM-726 <https://jira.onap.org/browse/OOM-726>`_] - Mirror AAI docker version changes into OOM from AAI-791
* [`OOM-772 <https://jira.onap.org/browse/OOM-772>`_] - Remove old DCAE from Release
-* [`OOM-793 <https://jira.onap.org/browse/OOM-793>`_] - Document OOM-722 health/ete script changes for onap-discuss/wiki/rtd/integration team
* [`OOM-801 <https://jira.onap.org/browse/OOM-801>`_] - Policy docker images rename - key off new name in POLICY-674
* [`OOM-810 <https://jira.onap.org/browse/OOM-810>`_] - Improve emsdriver code
* [`OOM-819 <https://jira.onap.org/browse/OOM-819>`_] - expose log/logstash 5044 as nodeport for external log producers outside of the kubernetes cluster
@@ -247,6 +190,7 @@ Task
* [`OOM-975 <https://jira.onap.org/browse/OOM-975>`_] - Notes are missing in multicloud
* [`OOM-1031 <https://jira.onap.org/browse/OOM-1031>`_] - Config Changes for consul to make vid, so, log health checks pass
* [`OOM-1032 <https://jira.onap.org/browse/OOM-1032>`_] - Making consul Stateful
+* [`OOM-1122 <https://jira.onap.org/browse/OOM-1122>`_] - Update APPC OOM chart to use Beijing release artifacts
Bug
***
@@ -324,7 +268,7 @@ Bug
* [`OOM-874 <https://jira.onap.org/browse/OOM-874>`_] - Inconsistent repository references in ONAP charts
* [`OOM-875 <https://jira.onap.org/browse/OOM-875>`_] - Cannot retrieve robot logs
* [`OOM-876 <https://jira.onap.org/browse/OOM-876>`_] - Some containers ignore the repository setting
-* [`OOM-878 <https://jira.onap.org/browse/OOM-878>`_] - MySQL slave nodes don't deploy when mysql.replicaCount>1
+* [`OOM-878 <https://jira.onap.org/browse/OOM-878>`_] - MySQL slave nodes don't deploy when mysql.replicaCount > 1
* [`OOM-881 <https://jira.onap.org/browse/OOM-881>`_] - SDN-C Portal pod fails to come up
* [`OOM-882 <https://jira.onap.org/browse/OOM-882>`_] - Some SDNC service names should be prefixed with the helm release name
* [`OOM-884 <https://jira.onap.org/browse/OOM-884>`_] - VID-VID mariadb pv is pointing to a wrong location
@@ -369,7 +313,6 @@ Bug
* [`OOM-964 <https://jira.onap.org/browse/OOM-964>`_] - SDC Healthcheck failure on sdc-be and sdc-kb containers down
* [`OOM-968 <https://jira.onap.org/browse/OOM-968>`_] - warning on default deployment values.yaml
* [`OOM-969 <https://jira.onap.org/browse/OOM-969>`_] - oomk8s images have no Dockerfile's
-* [`OOM-970 <https://jira.onap.org/browse/OOM-970>`_] - Can't configure mysql password for sdnctl user
* [`OOM-971 <https://jira.onap.org/browse/OOM-971>`_] - Common service name template should allow for chart name override
* [`OOM-974 <https://jira.onap.org/browse/OOM-974>`_] - Cassandra bootstrap is done incorrectly
* [`OOM-977 <https://jira.onap.org/browse/OOM-977>`_] - The esr-gui annotations should include a "path" param when register to MSB
@@ -403,7 +346,6 @@ Bug
* [`OOM-1039 <https://jira.onap.org/browse/OOM-1039>`_] - Service distribution to SO fails
* [`OOM-1041 <https://jira.onap.org/browse/OOM-1041>`_] - aai-service was renamed, but old references remain
* [`OOM-1042 <https://jira.onap.org/browse/OOM-1042>`_] - portalapps service was renamed, but old references remain
-* [`OOM-1044 <https://jira.onap.org/browse/OOM-1044>`_] - Fix image/table warning during deploy - since helm install switch a month ago - non-affecting - but check the yaml
* [`OOM-1045 <https://jira.onap.org/browse/OOM-1045>`_] - top level values.yaml missing entry for dmaap chart
* [`OOM-1049 <https://jira.onap.org/browse/OOM-1049>`_] - SDNC_UEB_LISTENER db
* [`OOM-1050 <https://jira.onap.org/browse/OOM-1050>`_] - Impossible to deploy consul using cache docker registry
@@ -415,12 +357,33 @@ Bug
* [`OOM-1064 <https://jira.onap.org/browse/OOM-1064>`_] - Improve docker registry secret management
* [`OOM-1066 <https://jira.onap.org/browse/OOM-1066>`_] - Updating TOSCA blueprint to sync up with helm configuration changes (add dmaap and oof/delete message-router)
* [`OOM-1068 <https://jira.onap.org/browse/OOM-1068>`_] - Update SO with new AAI cert
-* [`OOM-1070 <https://jira.onap.org/browse/OOM-1070>`_] - SO logs partially going to /var/log/ecomp/MSO
* [`OOM-1076 <https://jira.onap.org/browse/OOM-1076>`_] - some charts still using readiness check image from amsterdam 1.x
* [`OOM-1077 <https://jira.onap.org/browse/OOM-1077>`_] - AAI resources and traversal deployment failure on non-rancher envs
* [`OOM-1079 <https://jira.onap.org/browse/OOM-1079>`_] - Robot charts dont allow over ride of pub_key, dcae_collector_ip and dcae_collector_port
-* [`OOM-1081 <https://jira.onap.org/browse/OOM-1081>`_] - Remove component'mock'from TOSCA deployment
+* [`OOM-1081 <https://jira.onap.org/browse/OOM-1081>`_] - Remove component 'mock' from TOSCA deployment
* [`OOM-1082 <https://jira.onap.org/browse/OOM-1082>`_] - Wrong pv location of dcae postgres
+* [`OOM-1085 <https://jira.onap.org/browse/OOM-1085>`_] - appc hostname is incorrect in url
+* [`OOM-1086 <https://jira.onap.org/browse/OOM-1086>`_] - clamp deployment changes /dockerdata-nfs/ReleaseName dir permissions
+* [`OOM-1088 <https://jira.onap.org/browse/OOM-1088>`_] - APPC returns error for vCPE restart message from Policy
+* [`OOM-1089 <https://jira.onap.org/browse/OOM-1089>`_] - DCAE pods are not getting purged
+* [`OOM-1093 <https://jira.onap.org/browse/OOM-1093>`_] - Line wrapping issue in redis-cluster-config.sh script
+* [`OOM-1094 <https://jira.onap.org/browse/OOM-1094>`_] - Fix postgres startup
+* [`OOM-1095 <https://jira.onap.org/browse/OOM-1095>`_] - common makefile builds out of order
+* [`OOM-1096 <https://jira.onap.org/browse/OOM-1096>`_] - node port conflict SDNC (Geo enabled) & other charts
+* [`OOM-1097 <https://jira.onap.org/browse/OOM-1097>`_] - Nbi needs dep-nbi - crash on make all
+* [`OOM-1099 <https://jira.onap.org/browse/OOM-1099>`_] - Add External Interface NBI project into OOM TOSCA
+* [`OOM-1102 <https://jira.onap.org/browse/OOM-1102>`_] - Incorrect AAI services
+* [`OOM-1103 <https://jira.onap.org/browse/OOM-1103>`_] - Cannot disable NBI
+* [`OOM-1104 <https://jira.onap.org/browse/OOM-1104>`_] - Policy DROOLS configuration across container restarts
+* [`OOM-1110 <https://jira.onap.org/browse/OOM-1110>`_] - Clamp issue when connecting Policy
+* [`OOM-1111 <https://jira.onap.org/browse/OOM-1111>`_] - Please revert to using VNFSDK Postgres container
+* [`OOM-1114 <https://jira.onap.org/browse/OOM-1114>`_] - APPC is broken in latest helm chart
+* [`OOM-1115 <https://jira.onap.org/browse/OOM-1115>`_] - SDNC DGBuilder cant operate on DGs in database - need NodePort
+* [`OOM-1116 <https://jira.onap.org/browse/OOM-1116>`_] - Correct values needed by NBI chart
+* [`OOM-1124 <https://jira.onap.org/browse/OOM-1124>`_] - Update OOM APPC chart to enhance AAF support
+* [`OOM-1126 <https://jira.onap.org/browse/OOM-1126>`_] - Incorrect Port mapping between CDT Application and APPC main application
+* [`OOM-1127 <https://jira.onap.org/browse/OOM-1127>`_] - SO fails healthcheck
+* [`OOM-1128 <https://jira.onap.org/browse/OOM-1128>`_] - AAF CS fails to start in OpenLab
Sub-task
********
@@ -433,16 +396,75 @@ Sub-task
* [`OOM-655 <https://jira.onap.org/browse/OOM-655>`_] - Create alternate prepull script which provides more user feedback and logging
* [`OOM-753 <https://jira.onap.org/browse/OOM-753>`_] - Create Helm Sub-Chart for SO's embedded mariadb
* [`OOM-754 <https://jira.onap.org/browse/OOM-754>`_] - Create Helm Chart for SO
-* [`OOM-763 <https://jira.onap.org/browse/OOM-763>`_] - Work with Robot team to minimize/optimize configuration requirements
* [`OOM-774 <https://jira.onap.org/browse/OOM-774>`_] - Create Helm Sub-Chart for APPC's embedded mySQL database
* [`OOM-775 <https://jira.onap.org/browse/OOM-775>`_] - Create Helm Chart for APPC
* [`OOM-778 <https://jira.onap.org/browse/OOM-778>`_] - Replace NFS Provisioner with configurable PV storage solution
* [`OOM-825 <https://jira.onap.org/browse/OOM-825>`_] - Apache 2 License updation for All sqls and .js file
-* [`OOM-833 <https://jira.onap.org/browse/OOM-833>`_] - Apache 2 license addition for all configuration
* [`OOM-849 <https://jira.onap.org/browse/OOM-849>`_] - Policy Nexus component needs persistent volume for /sonatype-work
* [`OOM-991 <https://jira.onap.org/browse/OOM-991>`_] - Adjust SDC-BE init job timing from 10 to 30s to avoid restarts on single node systems
* [`OOM-1036 <https://jira.onap.org/browse/OOM-1036>`_] - update helm from 2.7.2 to 2.8.2 wiki/rtd
* [`OOM-1063 <https://jira.onap.org/browse/OOM-1063>`_] - Document Portal LoadBalancer Ingress IP Settings
+**Security Notes**
+
+OOM code has been formally scanned during build time using NexusIQ and no Critical vulnerability was found.
+
+Quick Links:
+ - `OOM project page <https://wiki.onap.org/display/DW/ONAP+Operations+Manager+Project>`_
+
+ - `Passing Badge information for OOM <https://bestpractices.coreinfrastructure.org/en/projects/1631>`_
+
+Version: 1.1.0
+--------------
+
+:Release Date: 2017-11-16
+
+**New Features**
+
+The Amsterdam release is the first release of the ONAP Operations Manager (OOM).
+
+The main goal of the Amsterdam release was to:
+
+ - Support Flexible Platform Deployment via Kubernetes of fully containerized OMAP components - on any type of environment.
+ - Support State Management of ONAP platform components.
+ - Support full production ONAP deployment and any variation of component level deployment for development.
+ - Platform Operations Orchestration / Control Loop Actions.
+ - Platform centralized logging with ELK stack.
+
+**Bug Fixes**
+
+ The full list of implemented user stories and epics is available on `JIRA <https://jira.onap.org/secure/RapidBoard.jspa?rapidView=41&view=planning.nodetail&epics=visible>`_
+ This is the first release of OOM, the defects fixed in this release were raised during the course of the release.
+ Anything not closed is captured below under Known Issues. If you want to review the defects fixed in the Amsterdam release, refer to Jira link above.
+
+**Known Issues**
+ - `OOM-6 <https://jira.onap.org/browse/OOM-6>`_ Automated platform deployment on Docker/Kubernetes
+
+ VFC, AAF, MSB minor issues.
+
+ Workaround: Manual configuration changes - however the reference vFirewall use case does not currently require these components.
+
+ - `OOM-10 <https://jira.onap.org/browse/OOM-10>`_ Platform configuration management.
+
+ OOM ONAP Configuration Management - Handling of Secrets.
+
+ Workaround: Automated workaround to be able to pull from protected docker repositories.
+
+
+**Security Issues**
+ N/A
+
+
+**Upgrade Notes**
+
+ N/A
+
+**Deprecation Notes**
+
+ N/A
+
+**Other**
+
+ N/A
End of Release Notes
diff --git a/docs/slave_nfs_node.sh b/docs/slave_nfs_node.sh
new file mode 100644
index 0000000000..5cb164ccac
--- /dev/null
+++ b/docs/slave_nfs_node.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+usage () {
+ echo "Usage:"
+ echo " ./$(basename $0) nfs_master_ip"
+ exit 1
+}
+
+if [ "$#" -ne 1 ]; then
+ echo "Missing NFS mater node"
+ usage
+fi
+
+MASTER_IP=$1
+
+#Install NFS common
+sudo apt-get update
+sudo apt-get install -y nfs-common
+
+#Create NFS directory
+sudo mkdir -p /dockerdata-nfs
+
+#Mount the remote NFS directory to the local one
+sudo mount $MASTER_IP:/dockerdata-nfs /dockerdata-nfs/
+echo "$MASTER_IP:/dockerdata-nfs /dockerdata-nfs nfs auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" | sudo tee -a /etc/fstab
diff --git a/kubernetes/aaf/charts/aaf-cm/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-cm/templates/deployment.yaml
index 652e2ac040..50c617e543 100644
--- a/kubernetes/aaf/charts/aaf-cm/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-cm/templates/deployment.yaml
@@ -68,7 +68,7 @@ spec:
name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/cm/bin/cm >> /opt/app/osaaf/logs/cm/stdout`date -I` 2>> /opt/app/osaaf/logs/cm/stderr`date -I`"]
+ command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/cm/bin/cm"]
volumeMounts:
- mountPath: /opt/app/osaaf
name: aaf-persistent-vol
diff --git a/kubernetes/aaf/charts/aaf-cm/values.yaml b/kubernetes/aaf/charts/aaf-cm/values.yaml
index b6f70344bc..24fe23e330 100644
--- a/kubernetes/aaf/charts/aaf-cm/values.yaml
+++ b/kubernetes/aaf/charts/aaf-cm/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_cm:2.1.0-SNAPSHOT
+image: onap/aaf/aaf_cm:2.1.1
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/charts/aaf-cs/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-cs/templates/deployment.yaml
index 4253d2fb71..893c5acbde 100644
--- a/kubernetes/aaf/charts/aaf-cs/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-cs/templates/deployment.yaml
@@ -68,7 +68,7 @@ spec:
- /bin/sh
- -c
- >
- /bin/sleep 30;
+ /bin/sleep {{ .Values.readiness.initialDelaySeconds }};
cd /data/;
cqlsh -u root -p root -f keyspace.cql ;
cqlsh -u root -p root -f init.cql ;
diff --git a/kubernetes/aaf/charts/aaf-cs/values.yaml b/kubernetes/aaf/charts/aaf-cs/values.yaml
index 6d5ed6c2ad..2131e851ad 100644
--- a/kubernetes/aaf/charts/aaf-cs/values.yaml
+++ b/kubernetes/aaf/charts/aaf-cs/values.yaml
@@ -42,14 +42,14 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 180
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 180
periodSeconds: 10
service:
diff --git a/kubernetes/aaf/charts/aaf-fs/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-fs/templates/deployment.yaml
index 0bbd6a5a36..b100bc4338 100644
--- a/kubernetes/aaf/charts/aaf-fs/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-fs/templates/deployment.yaml
@@ -68,7 +68,7 @@ spec:
name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- command: ["/bin/bash","-c","echo hello >> /opt/app/osaaf/logs/fs/stdout`date -I`;ln -s /opt/app/osaaf/data /data;/opt/app/aaf/fs/bin/fs >> /opt/app/osaaf/logs/fs/stdout`date -I` 2>> /opt/app/osaaf/logs/fs/stderr`date -I`"]
+ command: ["/bin/bash","-c","ln -s /opt/app/osaaf/data /data;/opt/app/aaf/fs/bin/fs "]
volumeMounts:
- mountPath: /opt/app/osaaf
name: aaf-persistent-vol
diff --git a/kubernetes/aaf/charts/aaf-fs/values.yaml b/kubernetes/aaf/charts/aaf-fs/values.yaml
index b8a2663cc8..1613a4fda5 100644
--- a/kubernetes/aaf/charts/aaf-fs/values.yaml
+++ b/kubernetes/aaf/charts/aaf-fs/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_fs:2.1.0-SNAPSHOT
+image: onap/aaf/aaf_fs:2.1.1
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/charts/aaf-gui/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-gui/templates/deployment.yaml
index ee503caceb..1aa632b620 100644
--- a/kubernetes/aaf/charts/aaf-gui/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-gui/templates/deployment.yaml
@@ -68,7 +68,7 @@ spec:
name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/gui/bin/gui >> /opt/app/osaaf/logs/gui/stdout`date -I` 2>> /opt/app/osaaf/logs/gui/stderr`date -I`"]
+ command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/gui/bin/gui "]
volumeMounts:
- mountPath: /opt/app/osaaf
name: aaf-persistent-vol
diff --git a/kubernetes/aaf/charts/aaf-gui/values.yaml b/kubernetes/aaf/charts/aaf-gui/values.yaml
index 9aa79fd40c..f87ec5c69d 100644
--- a/kubernetes/aaf/charts/aaf-gui/values.yaml
+++ b/kubernetes/aaf/charts/aaf-gui/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_gui:2.1.0-SNAPSHOT
+image: onap/aaf/aaf_gui:2.1.1
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/charts/aaf-hello/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-hello/templates/deployment.yaml
index b47d878f25..c9c82231b5 100644
--- a/kubernetes/aaf/charts/aaf-hello/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-hello/templates/deployment.yaml
@@ -68,7 +68,7 @@ spec:
name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/hello/bin/hello >> /opt/app/osaaf/logs/hello/stdout`date -I` 2>> /opt/app/osaaf/logs/hello/stderr`date -I`"]
+ command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/hello/bin/hello "]
volumeMounts:
- mountPath: /opt/app/osaaf
name: aaf-persistent-vol
diff --git a/kubernetes/aaf/charts/aaf-hello/values.yaml b/kubernetes/aaf/charts/aaf-hello/values.yaml
index c05ab56047..ca19298774 100644
--- a/kubernetes/aaf/charts/aaf-hello/values.yaml
+++ b/kubernetes/aaf/charts/aaf-hello/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_hello:2.1.0-SNAPSHOT
+image: onap/aaf/aaf_hello:2.1.1
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/charts/aaf-locate/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-locate/templates/deployment.yaml
index de5a46ec5b..2adb92decd 100644
--- a/kubernetes/aaf/charts/aaf-locate/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-locate/templates/deployment.yaml
@@ -66,7 +66,7 @@ spec:
name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/locate/bin/locate >> /opt/app/osaaf/logs/locate/stdout`date -I` 2>> /opt/app/osaaf/logs/locate/stderr`date -I`"]
+ command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/locate/bin/locate "]
volumeMounts:
- mountPath: /opt/app/osaaf
name: aaf-persistent-vol
diff --git a/kubernetes/aaf/charts/aaf-locate/values.yaml b/kubernetes/aaf/charts/aaf-locate/values.yaml
index e9b42cbf3d..675a4ad9a1 100644
--- a/kubernetes/aaf/charts/aaf-locate/values.yaml
+++ b/kubernetes/aaf/charts/aaf-locate/values.yaml
@@ -29,7 +29,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_locate:2.1.0-SNAPSHOT
+image: onap/aaf/aaf_locate:2.1.1
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/charts/aaf-oauth/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-oauth/templates/deployment.yaml
index 00d6ee0e59..f2361858fe 100644
--- a/kubernetes/aaf/charts/aaf-oauth/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-oauth/templates/deployment.yaml
@@ -68,7 +68,7 @@ spec:
name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/oauth/bin/oauth >> /opt/app/osaaf/logs/oauth/stdout`date -I` 2>> /opt/app/osaaf/logs/oauth/stderr`date -I`"]
+ command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/oauth/bin/oauth "]
volumeMounts:
- mountPath: /opt/app/osaaf
name: aaf-persistent-vol
diff --git a/kubernetes/aaf/charts/aaf-oauth/values.yaml b/kubernetes/aaf/charts/aaf-oauth/values.yaml
index 344d13838e..293505b88e 100644
--- a/kubernetes/aaf/charts/aaf-oauth/values.yaml
+++ b/kubernetes/aaf/charts/aaf-oauth/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_oauth:2.1.0-SNAPSHOT
+image: onap/aaf/aaf_oauth:2.1.1
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/charts/aaf-service/templates/deployment.yaml b/kubernetes/aaf/charts/aaf-service/templates/deployment.yaml
index 9fd6eba534..cff8fb2e03 100644
--- a/kubernetes/aaf/charts/aaf-service/templates/deployment.yaml
+++ b/kubernetes/aaf/charts/aaf-service/templates/deployment.yaml
@@ -68,7 +68,7 @@ spec:
name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
- command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/service/bin/service >> /opt/app/osaaf/logs/service/stdout`date -I` 2>> /opt/app/osaaf/logs/service/stderr`date -I`"]
+ command: ["/bin/bash","-c"," ln -s /opt/app/osaaf/data /data;/opt/app/aaf/service/bin/service "]
volumeMounts:
- mountPath: /opt/app/osaaf
name: aaf-persistent-vol
diff --git a/kubernetes/aaf/charts/aaf-service/values.yaml b/kubernetes/aaf/charts/aaf-service/values.yaml
index 361aa47f07..d5356d7e2b 100644
--- a/kubernetes/aaf/charts/aaf-service/values.yaml
+++ b/kubernetes/aaf/charts/aaf-service/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_service:2.1.0-SNAPSHOT
+image: onap/aaf/aaf_service:2.1.1
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml
index 768f89fb7e..c5fd489f31 100644
--- a/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml
@@ -28,7 +28,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/smsquorumclient:latest
+image: onap/aaf/smsquorumclient:2.0.0
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/charts/aaf-sms/values.yaml b/kubernetes/aaf/charts/aaf-sms/values.yaml
index df2b6ab640..b75038c973 100644
--- a/kubernetes/aaf/charts/aaf-sms/values.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/values.yaml
@@ -28,7 +28,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aaf/sms:latest
+image: onap/aaf/sms:2.0.0
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/aaf/resources/config/local/org.osaaf.aaf.p12 b/kubernetes/aaf/resources/config/local/org.osaaf.aaf.p12
index f40a7556da..ac1dece85c 100644
--- a/kubernetes/aaf/resources/config/local/org.osaaf.aaf.p12
+++ b/kubernetes/aaf/resources/config/local/org.osaaf.aaf.p12
Binary files differ
diff --git a/kubernetes/aaf/resources/config/local/org.osaaf.aaf_new-24e41f2f436018568cbdecdc1edbd605.p12 b/kubernetes/aaf/resources/config/local/org.osaaf.aaf_new-24e41f2f436018568cbdecdc1edbd605.p12
deleted file mode 100644
index f40a7556da..0000000000
--- a/kubernetes/aaf/resources/config/local/org.osaaf.aaf_new-24e41f2f436018568cbdecdc1edbd605.p12
+++ /dev/null
Binary files differ
diff --git a/kubernetes/aaf/values.yaml b/kubernetes/aaf/values.yaml
index 19250d8f69..9a51839241 100644
--- a/kubernetes/aaf/values.yaml
+++ b/kubernetes/aaf/values.yaml
@@ -38,7 +38,7 @@ config:
csServiceName: aaf-cass
# gerrit branch where the latest aaf/auth/sample/public code exists
gerritProject: http://gerrit.onap.org/r/aaf/authz.git
- gerritBranch: master
+ gerritBranch: 2.0.0-ONAP
# default number of instances
replicaCount: 1
diff --git a/kubernetes/aai/charts/aai-babel/resources/config/artifact-generator.properties b/kubernetes/aai/charts/aai-babel/resources/config/artifact-generator.properties
index 1d7e5fa01a..74897d69a4 100644
--- a/kubernetes/aai/charts/aai-babel/resources/config/artifact-generator.properties
+++ b/kubernetes/aai/charts/aai-babel/resources/config/artifact-generator.properties
@@ -22,6 +22,9 @@ AAI.model-invariant-id.cloud-region=425b2158-e51d-4509-9945-dad4556474a3
#complex widget details
AAI.model-invariant-id.complex=af91c2f7-35fc-43cf-a13d-443f385b2353
AAI.model-version-id.complex=3a8ab1ee-9220-4fe8-b89c-9251d160ddc2
+#configuration widget details
+AAI.model-invariant-id.configuration=166c050d-f69d-4305-943e-0bc58c3a26cf
+AAI.model-version-id.configuration=5a175add-57e4-4a5d-8b02-c36f1d69c52b
#connector widget details
AAI.model-version-id.connector=22104c9f-29fd-462f-be07-96cd6b46dd33
AAI.model-invariant-id.connector=4c01c948-7607-4d66-8a6c-99c2c2717936
diff --git a/kubernetes/aai/charts/aai-babel/values.yaml b/kubernetes/aai/charts/aai-babel/values.yaml
index 7118d1669b..2239c9a11d 100644
--- a/kubernetes/aai/charts/aai-babel/values.yaml
+++ b/kubernetes/aai/charts/aai-babel/values.yaml
@@ -24,7 +24,7 @@ global:
#################################################################
# application image
-image: onap/babel:1.2-STAGING-latest
+image: onap/babel:1.2.0
# application configuration
config:
diff --git a/kubernetes/aai/charts/aai-champ/values.yaml b/kubernetes/aai/charts/aai-champ/values.yaml
index 13cc9e77c0..78afd419ca 100644
--- a/kubernetes/aai/charts/aai-champ/values.yaml
+++ b/kubernetes/aai/charts/aai-champ/values.yaml
@@ -24,7 +24,7 @@ global:
#################################################################
# application image
-image: onap/champ:1.2-STAGING-latest
+image: onap/champ:1.2.3
# application configuration
config:
diff --git a/kubernetes/aai/charts/aai-data-router/resources/config/schemaIngest.properties b/kubernetes/aai/charts/aai-data-router/resources/config/schemaIngest.properties
new file mode 100644
index 0000000000..9174d2f5a2
--- /dev/null
+++ b/kubernetes/aai/charts/aai-data-router/resources/config/schemaIngest.properties
@@ -0,0 +1,31 @@
+#
+# ============LICENSE_START=======================================================
+# org.onap.aai
+# ================================================================================
+# Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright © 2017-2018 Amdocs
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+
+# Properties for the SchemaLocationsBean
+
+# The AAI Schema jar will be unpacked to bundleconfig/etc
+schemaConfig=NA
+# OXM files named aai_oxm_v*.xml are unpacked here:
+nodeDir=/opt/app/data-router/bundleconfig/etc/oxm
+# DB Edge Rules are unpacked here:
+edgeDir=
+# DB Edge Property files are copied here:
+edgePropsDir=
diff --git a/kubernetes/aai/charts/aai-data-router/resources/dynamic/conf/entity-event-policy.xml b/kubernetes/aai/charts/aai-data-router/resources/dynamic/conf/entity-event-policy.xml
index e995053f12..5db9092951 100644
--- a/kubernetes/aai/charts/aai-data-router/resources/dynamic/conf/entity-event-policy.xml
+++ b/kubernetes/aai/charts/aai-data-router/resources/dynamic/conf/entity-event-policy.xml
@@ -5,8 +5,8 @@
http://www.springframework.org/schema/beans/spring-beans.xsd">
<bean id="eepConfig" class="org.onap.aai.datarouter.policy.EntityEventPolicyConfig" >
- <property name="sourceDomain" value="default" />
- <property name="searchBaseUrl" value="https://search-data-service:9509" />
+ <property name="sourceDomain" value="dev" />
+ <property name="searchBaseUrl" value="https://{{.Values.global.searchData.serviceName}}.{{.Release.Namespace}}:9509" />
<property name="searchEndpoint" value="services/search-data-service/v1/search/indexes/" />
<property name="searchEndpointDocuments" value = "documents" />
<property name="searchEntitySearchIndex" value="entity-search-index" />
diff --git a/kubernetes/aai/charts/aai-data-router/templates/configmap.yaml b/kubernetes/aai/charts/aai-data-router/templates/configmap.yaml
index 9652712aa6..badb53fb97 100644
--- a/kubernetes/aai/charts/aai-data-router/templates/configmap.yaml
+++ b/kubernetes/aai/charts/aai-data-router/templates/configmap.yaml
@@ -4,7 +4,7 @@ metadata:
name: {{ include "common.fullname" . }}-prop
namespace: {{ include "common.namespace" . }}
data:
-{{ tpl (.Files.Glob "resources/config/data-router.properties").AsConfig . | indent 2 }}
+{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
---
apiVersion: v1
kind: ConfigMap
diff --git a/kubernetes/aai/charts/aai-data-router/templates/deployment.yaml b/kubernetes/aai/charts/aai-data-router/templates/deployment.yaml
index 2d45a0b097..a045189299 100644
--- a/kubernetes/aai/charts/aai-data-router/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-data-router/templates/deployment.yaml
@@ -82,8 +82,11 @@ spec:
- mountPath: /opt/app/data-router/config/auth
name: {{ include "common.fullname" . }}-auth
- mountPath: /opt/app/data-router/config/data-router.properties
- name: {{ include "common.fullname" . }}-properties
subPath: data-router.properties
+ name: {{ include "common.fullname" . }}-properties
+ - mountPath: /opt/app/data-router/config/schemaIngest.properties
+ subPath: schemaIngest.properties
+ name: {{ include "common.fullname" . }}-properties
- mountPath: /opt/app/data-router/dynamic/routes/entity-event.route
subPath: entity-event.route
name: {{ include "common.fullname" . }}-dynamic-route
@@ -116,6 +119,11 @@ spec:
- name: {{ include "common.fullname" . }}-properties
configMap:
name: {{ include "common.fullname" . }}-prop
+ items:
+ - key: data-router.properties
+ path: data-router.properties
+ - key: schemaIngest.properties
+ path: schemaIngest.properties
- name: {{ include "common.fullname" . }}-dynamic-route
configMap:
name: {{ include "common.fullname" . }}-dynamic
diff --git a/kubernetes/aai/charts/aai-data-router/values.yaml b/kubernetes/aai/charts/aai-data-router/values.yaml
index 5ce7689822..51fc65a408 100644
--- a/kubernetes/aai/charts/aai-data-router/values.yaml
+++ b/kubernetes/aai/charts/aai-data-router/values.yaml
@@ -7,7 +7,7 @@ global: # global defaults
# application image
repository: nexus3.onap.org:10001
-image: onap/data-router:1.2-STAGING-latest
+image: onap/data-router:1.2.2
pullPolicy: Always
restartPolicy: Always
diff --git a/kubernetes/aai/charts/aai-elasticsearch/templates/deployment.yaml b/kubernetes/aai/charts/aai-elasticsearch/templates/deployment.yaml
index 0417536625..40c621068c 100644
--- a/kubernetes/aai/charts/aai-elasticsearch/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-elasticsearch/templates/deployment.yaml
@@ -17,6 +17,7 @@ spec:
metadata:
labels:
app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
name: {{ include "common.name" . }}
spec:
initContainers:
diff --git a/kubernetes/aai/charts/aai-elasticsearch/templates/service.yaml b/kubernetes/aai/charts/aai-elasticsearch/templates/service.yaml
index 54ee8b670d..f3a6dbbc5e 100644
--- a/kubernetes/aai/charts/aai-elasticsearch/templates/service.yaml
+++ b/kubernetes/aai/charts/aai-elasticsearch/templates/service.yaml
@@ -20,6 +20,6 @@ spec:
name: {{ .Values.service.portName }}
{{- end}}
selector:
- app: {{ include "common.fullname" . }}
+ app: {{ include "common.name" . }}
release: {{ .Release.Name }}
clusterIP: None
diff --git a/kubernetes/aai/charts/aai-gizmo/resources/config/log/logback.xml b/kubernetes/aai/charts/aai-gizmo/resources/config/log/logback.xml
index f63afd3c6b..a04d44cefa 100644
--- a/kubernetes/aai/charts/aai-gizmo/resources/config/log/logback.xml
+++ b/kubernetes/aai/charts/aai-gizmo/resources/config/log/logback.xml
@@ -1,213 +1,179 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<configuration scan="true" scanPeriod="3 seconds" debug="true">
- <!--<jmxConfigurator /> -->
- <!-- directory path for all other type logs -->
- <property name="logDir" value="/var/log/onap" />
- <property name="componentName" value="aai" />
- <property name="subComponentName" value="aai-CRUD" />
- <property name="logDirectory" value="${logDir}/${componentName}/${subComponentName}" />
- <!-- default eelf log file names -->
- <property name="generalLogName" value="application" />
- <property name="errorLogName" value="error" />
- <property name="metricsLogName" value="metrics" />
- <property name="auditLogName" value="audit" />
- <property name="debugLogName" value="debug" />
- <property name="queueSize" value="256" />
- <property name="maxFileSize" value="50MB" />
- <property name="maxHistory" value="30" />
- <property name="totalSizeCap" value="10GB" />
- <property name="pattern" value="%d{&quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&quot;, UTC}\t[%thread]\t%-5level\t%logger\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
- <!-- Example evaluator filter applied against console appender -->
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <!-- ============================================================================ -->
- <!-- EELF Appenders -->
- <!-- ============================================================================ -->
- <!-- The EELFAppender is used to record events to the general application
+<configuration scan="true" scanPeriod="3 seconds" debug="false">
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+
+ <property name="logDir" value="/var/log/onap" />
+
+
+ <!-- specify the component name
+ <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC" -->
+ <property name="componentName" value="AAI-CRUD" />
+
+ <!-- default eelf log file names -->
+ <property name="generalLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+
+ <property name="errorLogPattern" value="%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%mdc{RequestId}|%thread|GIZMO|%mdc{PartnerName}|%logger||%.-5level|%msg%n" />
+ <property name="auditMetricPattern" value="%m%n" />
+
+ <property name="logDirectory" value="${logDir}/${componentName}" />
+
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+
+ <!-- The EELFAppender is used to record events to the general application
log -->
- <appender name="EELF" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${generalLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
- <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
- <level>INFO</level>
- </filter>
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELF" />
- </appender>
- <!-- EELF Audit Appender. This appender is used to record audit engine
- related logging events. The audit logger and appender are specializations
- of the EELF application root logger and appender. This can be used to segregate
- Policy engine events from other components, or it can be eliminated to record
+
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+
+
+ <!-- EELF Audit Appender. This appender is used to record audit engine
+ related logging events. The audit logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
these events as part of the application root log. -->
- <appender name="EELFAudit" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${auditLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFAudit" />
- </appender>
- <appender name="EELFMetrics" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${metricsLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFMetrics" />
- </appender>
- <appender name="EELFError" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${errorLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
- <level>INFO</level>
- </filter>
- </appender>
- <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFError" />
- </appender>
- <appender name="EELFDebug" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>${logDirectory}/${debugLogName}.log</file>
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
- <maxFileSize>${maxFileSize}</maxFileSize>
- </timeBasedFileNamingAndTriggeringPolicy>
- <maxHistory>${maxHistory}</maxHistory>
- <totalSizeCap>${totalSizeCap}</totalSizeCap>
- </rollingPolicy>
- <encoder>
- <pattern>${pattern}</pattern>
- </encoder>
- </appender>
- <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
- <queueSize>${queueSize}</queueSize>
- <appender-ref ref="EELFDebug" />
- <includeCallerData>true</includeCallerData>
- </appender>
- <!-- ============================================================================ -->
- <!-- EELF loggers -->
- <!-- ============================================================================ -->
- <logger name="com.att.eelf" level="info" additivity="false">
- <appender-ref ref="asyncEELF" />
- </logger>
- <logger name="com.att.eelf.security" level="info" additivity="false">
- <appender-ref ref="asyncEELFSecurity" />
- </logger>
- <logger name="com.att.eelf.perf" level="info" additivity="false">
- <appender-ref ref="asyncEELFPerformance" />
- </logger>
- <logger name="com.att.eelf.server" level="info" additivity="false">
- <appender-ref ref="asyncEELFServer" />
- </logger>
- <logger name="com.att.eelf.policy" level="info" additivity="false">
- <appender-ref ref="asyncEELFPolicy" />
- </logger>
- <logger name="com.att.eelf.debug" level="debug" additivity="false">
- <appender-ref ref="asyncEELFDebug" />
- </logger>
- <logger name="com.att.eelf.audit" level="info" additivity="false">
- <appender-ref ref="asyncEELFAudit" />
- </logger>
- <logger name="com.att.eelf.metrics" level="info" additivity="false">
- <appender-ref ref="asyncEELFMetrics" />
- </logger>
- <logger name="com.att.eelf.error" level="info" additivity="false">
- <appender-ref ref="asyncEELFError" />
- </logger>
- <!-- Spring related loggers -->
- <logger name="org.springframework" level="WARN" />
- <logger name="org.springframework.beans" level="WARN" />
- <logger name="org.springframework.web" level="WARN" />
- <logger name="com.blog.spring.jms" level="WARN" />
- <!-- AJSC Services (bootstrap services) -->
- <logger name="ajsc" level="WARN" />
- <logger name="ajsc.RouteMgmtService" level="WARN" />
- <logger name="ajsc.ComputeService" level="WARN" />
- <logger name="ajsc.VandelayService" level="WARN" />
- <logger name="ajsc.FilePersistenceService" level="WARN" />
- <logger name="ajsc.UserDefinedJarService" level="WARN" />
- <logger name="ajsc.UserDefinedBeansDefService" level="WARN" />
- <logger name="ajsc.LoggingConfigurationService" level="WARN" />
- <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
- logging) -->
- <logger name="ajsc.utils" level="WARN" />
- <logger name="ajsc.utils.DME2Helper" level="WARN" />
- <logger name="ajsc.filters" level="WARN" />
- <logger name="ajsc.beans.interceptors" level="WARN" />
- <logger name="ajsc.restlet" level="WARN" />
- <logger name="ajsc.servlet" level="WARN" />
- <logger name="com.att" level="INFO" />
- <logger name="com.att.ajsc.csi.logging" level="WARN" />
- <logger name="com.att.ajsc.filemonitor" level="WARN" />
- <!-- CRUD Service loggers -->
- <logger name="org.openecomp.crud" level="INFO" />
- <!-- Other Loggers that may help troubleshoot -->
- <logger name="net.sf" level="WARN" />
- <logger name="org.apache" level="WARN" />
- <logger name="org.apache.commons.httpclient" level="WARN" />
- <logger name="org.apache.commons" level="WARN" />
- <logger name="org.apache.coyote" level="WARN" />
- <logger name="org.apache.jasper" level="WARN" />
- <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+
+ <appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${auditMetricPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+
+ <appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} -
+ %msg%n"</pattern> -->
+ <pattern>${auditMetricPattern}</pattern>
+ </encoder>
+ </appender>
+
+
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics"/>
+ </appender>
+
+ <appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${debugLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.log.zip
+ </fileNamePattern>
+ <maxHistory>60</maxHistory>
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLogPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>false</includeCallerData>
+ </appender>
+
+
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger name="com.att.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFDebug" />
+ </logger>
+
+ <logger name="com.att.eelf.audit" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+ <logger name="com.att.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+
+ <!-- Spring related loggers -->
+ <logger name="org.springframework" level="WARN" />
+ <logger name="org.springframework.beans" level="WARN" />
+ <logger name="org.springframework.web" level="WARN" />
+ <logger name="com.blog.spring.jms" level="WARN" />
+
+ <!-- ARMAdapter Service loggers -->
+ <logger name="com.amdocs.aai.armadapter" level="INFO" />
+
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger name="net.sf" level="WARN" />
+ <logger name="org.apache" level="WARN" />
+ <logger name="org.apache.commons.httpclient" level="WARN" />
+ <logger name="org.apache.commons" level="WARN" />
+ <logger name="org.apache.coyote" level="WARN" />
+ <logger name="org.apache.jasper" level="WARN" />
+
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
May aid in troubleshooting) -->
- <logger name="org.apache.camel" level="WARN" />
- <logger name="org.apache.cxf" level="WARN" />
- <logger name="org.apache.camel.processor.interceptor" level="WARN" />
- <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" />
- <logger name="org.apache.cxf.service" level="WARN" />
- <logger name="org.restlet" level="WARN" />
- <logger name="org.apache.camel.component.restlet" level="WARN" />
- <!-- logback internals logging -->
- <logger name="ch.qos.logback.classic" level="WARN" />
- <logger name="ch.qos.logback.core" level="WARN" />
- <root level="INFO">
- <appender-ref ref="asyncEELFDebug" />
- </root>
+ <logger name="org.apache.camel" level="WARN" />
+ <logger name="org.apache.cxf" level="WARN" />
+ <logger name="org.apache.camel.processor.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.service" level="WARN" />
+ <logger name="org.restlet" level="WARN" />
+ <logger name="org.apache.camel.component.restlet" level="WARN" />
+
+
+ <!-- logback internals logging -->
+ <logger name="ch.qos.logback.classic" level="WARN" />
+ <logger name="ch.qos.logback.core" level="WARN" />
+
+ <root>
+ <appender-ref ref="asyncEELF" />
+ <!-- <appender-ref ref="asyncEELFDebug" /> -->
+ </root>
+
</configuration>
diff --git a/kubernetes/aai/charts/aai-gizmo/resources/config/schemaIngest.properties b/kubernetes/aai/charts/aai-gizmo/resources/config/schemaIngest.properties
new file mode 100644
index 0000000000..dfdf3e07af
--- /dev/null
+++ b/kubernetes/aai/charts/aai-gizmo/resources/config/schemaIngest.properties
@@ -0,0 +1,31 @@
+#
+# ============LICENSE_START=======================================================
+# org.onap.aai
+# ================================================================================
+# Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright © 2017-2018 Amdocs
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+
+# Properties for the SchemaLocationsBean
+
+# The AAI Schema jar will be unpacked to bundleconfig/etc
+schemaConfig=NA
+# OXM files named aai_oxm_v*.xml are unpacked here:
+nodeDir=/opt/app/crud-api/bundleconfig/etc/oxm
+# DB Edge Rules are unpacked here:
+edgeDir=/opt/app/crud-api/bundleconfig/etc/dbedgerules
+# DB Edge Property files are copied here:
+edgePropsDir=/opt/app/crud-service/config/model
diff --git a/kubernetes/aai/charts/aai-gizmo/templates/deployment.yaml b/kubernetes/aai/charts/aai-gizmo/templates/deployment.yaml
index 7b0a57791c..c69b3a068b 100644
--- a/kubernetes/aai/charts/aai-gizmo/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-gizmo/templates/deployment.yaml
@@ -72,6 +72,9 @@ spec:
- mountPath: /opt/app/crud-service/config/crud-api.properties
subPath: crud-api.properties
name: {{ include "common.fullname" . }}-config
+ - mountPath: /opt/app/crud-service/config/schemaIngest.properties
+ subPath: schemaIngest.properties
+ name: {{ include "common.fullname" . }}-config
- mountPath: /opt/app/crud-service/config/model/
name: {{ include "common.fullname" . }}-model-config
- mountPath: /opt/app/crud-service/config/auth
@@ -127,6 +130,8 @@ spec:
items:
- key: crud-api.properties
path: crud-api.properties
+ - key: schemaIngest.properties
+ path: schemaIngest.properties
- key: crud-beans.xml
path: crud-beans.xml
- name: {{ include "common.fullname" . }}-logback-config
diff --git a/kubernetes/aai/charts/aai-gizmo/values.yaml b/kubernetes/aai/charts/aai-gizmo/values.yaml
index 1ea924fc13..acf64172cc 100644
--- a/kubernetes/aai/charts/aai-gizmo/values.yaml
+++ b/kubernetes/aai/charts/aai-gizmo/values.yaml
@@ -22,7 +22,7 @@ global:
#################################################################
# application image
-image: onap/gizmo:1.1-STAGING-latest
+image: onap/gizmo:1.2.1
# application configuration
config:
diff --git a/kubernetes/aai/charts/aai-modelloader/values.yaml b/kubernetes/aai/charts/aai-modelloader/values.yaml
index 676fc6fdfc..8acb28f9d2 100644
--- a/kubernetes/aai/charts/aai-modelloader/values.yaml
+++ b/kubernetes/aai/charts/aai-modelloader/values.yaml
@@ -7,7 +7,7 @@ global: # global defaults
# application image
repository: nexus3.onap.org:10001
-image: onap/model-loader:1.2-STAGING-latest
+image: onap/model-loader:1.2.1
pullPolicy: Always
restartPolicy: Always
diff --git a/kubernetes/aai/charts/aai-resources/resources/config/application.properties b/kubernetes/aai/charts/aai-resources/resources/config/application.properties
index a65c04e4f0..cfeabb9e0d 100644
--- a/kubernetes/aai/charts/aai-resources/resources/config/application.properties
+++ b/kubernetes/aai/charts/aai-resources/resources/config/application.properties
@@ -44,7 +44,7 @@ server.ssl.key-store-type=JKS
# JMS bind address host port
jms.bind.address=tcp://localhost:61647
dmaap.ribbon.eureka.enabled=false
-dmaap.ribbon.listOfServers=dmaap.{{ include "common.namespace" . }}:3904
+dmaap.ribbon.listOfServers=message-router.{{.Release.Namespace}}:3904
# Number of milliseconds to wait before making ping requests again
dmaap.ribbon.ServerListRefreshInterval=75000
dmaap.ribbon.NFLoadBalancerPingInterval=75000
diff --git a/kubernetes/aai/charts/aai-resources/values.yaml b/kubernetes/aai/charts/aai-resources/values.yaml
index 7b55aa75e2..28b271e54a 100644
--- a/kubernetes/aai/charts/aai-resources/values.yaml
+++ b/kubernetes/aai/charts/aai-resources/values.yaml
@@ -9,7 +9,7 @@ global: # global defaults
# application image
repository: nexus3.onap.org:10001
-image: onap/aai-resources:1.2-STAGING-latest
+image: onap/aai-resources:1.2.2
pullPolicy: Always
restartPolicy: Always
diff --git a/kubernetes/aai/charts/aai-search-data/values.yaml b/kubernetes/aai/charts/aai-search-data/values.yaml
index c6f42bd38b..2c3a005db9 100644
--- a/kubernetes/aai/charts/aai-search-data/values.yaml
+++ b/kubernetes/aai/charts/aai-search-data/values.yaml
@@ -9,7 +9,7 @@ global: # global defaults
# application image
repository: nexus3.onap.org:10001
-image: onap/search-data-service:1.2-STAGING-latest
+image: onap/search-data-service:1.2.2
pullPolicy: Always
restartPolicy: Always
diff --git a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml
index 0763f94cd1..29eb4a311a 100644
--- a/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml
+++ b/kubernetes/aai/charts/aai-sparky-be/resources/config/spring-beans/sparky-core-sync.xml
@@ -142,7 +142,7 @@
<map>
<entry key="aggregate_generic-vnf_index">
<bean class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
- <property name="indexName" value="aggregate_generic-vnf_index" />
+ <property name="indexName" value="aggregate-vnf-index" />
<property name="indexDocType" value="default" />
<property name="indexMappingsFileName" value="/schemas/dynamicMappings.json" />
</bean>
@@ -212,7 +212,7 @@
</bean>
<bean id="autoSuggestionEntitySchemaConfig" class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
- <property name="indexName" value="entityautosuggestindex" />
+ <property name="indexName" value="entity-autosuggest-index" />
<property name="indexDocType" value="default" />
<property name="indexMappingsFileName" value="/schemas/autoSuggestMappings.json" />
<property name="indexSettingsFileName" value="/schemas/autoSuggestSettings.json" />
@@ -296,7 +296,7 @@
</bean>
<bean id="geoEntitySchemaConfig" class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
- <property name="indexName" value="topographicalsearchindex" />
+ <property name="indexName" value="topography-search-index" />
<property name="indexDocType" value="default" />
<property name="indexMappingsFileName" value="/schemas/dynamicMappings.json" />
</bean>
@@ -336,7 +336,7 @@
</bean>
<bean id="viewInspectySchemaConfig" class="org.onap.aai.sparky.sync.config.ElasticSearchSchemaConfig">
- <property name="indexName" value="entitysearchindex" />
+ <property name="indexName" value="entity-search-index" />
<property name="indexDocType" value="default" />
<property name="indexMappingsFileName" value="/schemas/es_mappings.json" />
<property name="indexSettingsFileName" value="/schemas/es_settings.json" />
diff --git a/kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml b/kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml
index c1b2c3ccae..b9de92bb28 100644
--- a/kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-sparky-be/templates/deployment.yaml
@@ -34,6 +34,25 @@ spec:
release: {{ .Release.Name }}
name: {{ include "common.name" . }}
spec:
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - aai-elasticsearch
+ - --container-name
+ - aai-search-data
+ - --container-name
+ - aai
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ name: {{ include "common.name" . }}-readiness
containers:
- name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
@@ -46,7 +65,7 @@ spec:
- name: KEYSTORE_PASSWORD
value: {{ .Values.config.keyStorePassword }}
- name: SPARKY_SSL_ENABLED
- value: 'true'
+ value: 'false'
- name: SPARKY_PORTAL_ENABLED
value: 'false'
volumeMounts:
@@ -83,13 +102,13 @@ spec:
{{- if eq .Values.liveness.enabled true }}
livenessProbe:
tcpSocket:
- port: {{ .Values.service.internalPort2 }}
+ port: {{ .Values.service.internalPort }}
initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
periodSeconds: {{ .Values.liveness.periodSeconds }}
{{ end -}}
readinessProbe:
tcpSocket:
- port: {{ .Values.service.internalPort2 }}
+ port: {{ .Values.service.internalPort }}
initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.readiness.periodSeconds }}
resources:
diff --git a/kubernetes/aai/charts/aai-sparky-be/templates/service.yaml b/kubernetes/aai/charts/aai-sparky-be/templates/service.yaml
index e342a9a688..385cdeabca 100644
--- a/kubernetes/aai/charts/aai-sparky-be/templates/service.yaml
+++ b/kubernetes/aai/charts/aai-sparky-be/templates/service.yaml
@@ -21,5 +21,4 @@ spec:
{{- end}}
selector:
app: {{ include "common.name" . }}
- release: {{ .Release.Name }}
- clusterIP: None
+ release: {{ .Release.Name }} \ No newline at end of file
diff --git a/kubernetes/aai/charts/aai-sparky-be/values.yaml b/kubernetes/aai/charts/aai-sparky-be/values.yaml
index b9ef4ca85d..65aa79a537 100644
--- a/kubernetes/aai/charts/aai-sparky-be/values.yaml
+++ b/kubernetes/aai/charts/aai-sparky-be/values.yaml
@@ -14,7 +14,7 @@ global: # global defaults
# application image
repository: nexus3.onap.org:10001
-image: onap/sparky-be:1.2-STAGING-latest
+image: onap/sparky-be:1.2.1
pullPolicy: Always
restartPolicy: Always
@@ -25,7 +25,7 @@ config:
elasticsearchHttpPort: 9200
keyStorePassword: OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
keystoreAliasPassword: OBF:1i9a1u2a1unz1lr61wn51wn11lss1unz1u301i6o
- gerritBranch: master
+ gerritBranch: 2.0.0-ONAP
gerritProject: http://gerrit.onap.org/r/aai/test-config
portalUsername: aaiui
portalPassword: 1t2v1vfv1unz1vgz1t3b
@@ -61,9 +61,10 @@ readiness:
periodSeconds: 10
service:
- type: ClusterIP
+ type: NodePort
portName: aai-sparky-be
internalPort: 9517
+ nodePort: 20
internalPort2: 8000
ingress:
diff --git a/kubernetes/aai/charts/aai-traversal/values.yaml b/kubernetes/aai/charts/aai-traversal/values.yaml
index c22ae75c3f..5cc49a4716 100644
--- a/kubernetes/aai/charts/aai-traversal/values.yaml
+++ b/kubernetes/aai/charts/aai-traversal/values.yaml
@@ -9,7 +9,7 @@ global: # global defaults
# application image
repository: nexus3.onap.org:10001
-image: onap/aai-traversal:1.2-STAGING-latest
+image: onap/aai-traversal:1.2.2
pullPolicy: Always
restartPolicy: Always
diff --git a/kubernetes/aai/resources/config/aai/aai_keystore b/kubernetes/aai/resources/config/aai/aai_keystore
index 30fee2f602..83cae95273 100644
--- a/kubernetes/aai/resources/config/aai/aai_keystore
+++ b/kubernetes/aai/resources/config/aai/aai_keystore
Binary files differ
diff --git a/kubernetes/aai/resources/config/haproxy/haproxy.cfg b/kubernetes/aai/resources/config/haproxy/haproxy.cfg
index b9721ae41a..e90f737bff 100644
--- a/kubernetes/aai/resources/config/haproxy/haproxy.cfg
+++ b/kubernetes/aai/resources/config/haproxy/haproxy.cfg
@@ -22,6 +22,9 @@ defaults
log global
mode http
option httplog
+ option ssl-hello-chk
+ option httpchk GET /aai/util/echo HTTP/1.1\r\nHost:\ aai\r\nX-TransactionId:\ haproxy-0111\r\nX-FromAppId:\ haproxy\r\nAccept:\ application/json\r\nAuthorization:\ Basic\ QUFJOkFBSQ==
+ default-server init-addr none
# option dontlognull
# errorfile 400 /etc/haproxy/errors/400.http
# errorfile 403 /etc/haproxy/errors/403.http
@@ -95,7 +98,8 @@ backend IST_Default_8447
balance roundrobin
http-request set-header X-Forwarded-Port %[src_port]
http-response set-header Strict-Transport-Security max-age=16000000;\ includeSubDomains;\ preload;
- server aai-resources.{{.Release.Namespace}} aai-resources.{{.Release.Namespace}}:8447 port 8447 ssl verify none
+ server aai-resources.{{.Release.Namespace}} aai-resources.{{.Release.Namespace}}.svc.cluster.local:8447 resolvers kubernetes check check-ssl port 8447 ssl verify none
+
#######################
# BACKEND 8446#########
@@ -105,7 +109,7 @@ backend IST_AAI_8446
balance roundrobin
http-request set-header X-Forwarded-Port %[src_port]
http-response set-header Strict-Transport-Security max-age=16000000;\ includeSubDomains;\ preload;
- server aai-traversal.{{.Release.Namespace}} aai-traversal.{{.Release.Namespace}}:8446 port 8446 ssl verify none
+ server aai-traversal.{{.Release.Namespace}} aai-traversal.{{.Release.Namespace}}.svc.cluster.local:8446 resolvers kubernetes check check-ssl port 8446 ssl verify none
listen IST_AAI_STATS
mode http
diff --git a/kubernetes/aai/values.yaml b/kubernetes/aai/values.yaml
index 2b0180e53b..b07048e134 100644
--- a/kubernetes/aai/values.yaml
+++ b/kubernetes/aai/values.yaml
@@ -56,7 +56,7 @@ global: # global defaults
# application image
dockerhubRepository: registry.hub.docker.com
-image: aaionap/haproxy:1.2.2
+image: aaionap/haproxy:1.2.4
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/appc/charts/appc-cdt/templates/deployment.yaml b/kubernetes/appc/charts/appc-cdt/templates/deployment.yaml
index 2226b75bbf..8ac5d3ed1f 100644
--- a/kubernetes/appc/charts/appc-cdt/templates/deployment.yaml
+++ b/kubernetes/appc/charts/appc-cdt/templates/deployment.yaml
@@ -21,6 +21,8 @@ spec:
- name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ command:
+ - /opt/startCdt.sh
ports:
- containerPort: {{ .Values.service.internalPort }}
name: {{ .Values.service.name }}
@@ -39,6 +41,11 @@ spec:
initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.readiness.periodSeconds }}
env:
+ # This sets the port that CDT will use to connect to the main appc container.
+ # The 11 is the node port suffix that is used in the main appc oom templates
+ # for nodePort3. This value will be configured in appc main chart in appc-cdt section.
+ - name: CDT_PORT
+ value: "{{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.nodePort3 }}"
volumeMounts:
- mountPath: /etc/localtime
name: localtime
diff --git a/kubernetes/appc/charts/appc-cdt/values.yaml b/kubernetes/appc/charts/appc-cdt/values.yaml
index 123098cdd2..67cf8d568b 100644
--- a/kubernetes/appc/charts/appc-cdt/values.yaml
+++ b/kubernetes/appc/charts/appc-cdt/values.yaml
@@ -9,7 +9,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/appc-cdt-image:1.3.0-SNAPSHOT-latest
+image: onap/appc-cdt-image:1.4.0-SNAPSHOT-latest
pullPolicy: Always
# application configuration
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/health_check.sh b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/health_check.sh
new file mode 100755
index 0000000000..544358c1af
--- /dev/null
+++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/health_check.sh
@@ -0,0 +1,15 @@
+#!/bin/bash -x
+
+startODL_status=$(ps -e | grep startODL | wc -l)
+waiting_bundles=$(/opt/opendaylight/current/bin/client bundle:list | grep Waiting | wc -l)
+run_level=$(/opt/opendaylight/current/bin/client system:start-level)
+
+ if [ "$run_level" == "Level 100" ] && [ "$startODL_status" -lt "1" ] && [ "$waiting_bundles" -lt "1" ]
+ then
+ echo APPC is healthy.
+ else
+ echo APPC is not healthy.
+ exit 1
+ fi
+
+exit 0
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
index e13193f364..18a2783c5f 100755
--- a/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
+++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
@@ -55,6 +55,9 @@ APPC_HOME=${APPC_HOME:-/opt/onap/appc}
SLEEP_TIME=${SLEEP_TIME:-120}
MYSQL_PASSWD=${MYSQL_PASSWD:-{{.Values.config.dbRootPassword}}}
ENABLE_ODL_CLUSTER=${ENABLE_ODL_CLUSTER:-false}
+ENABLE_AAF=${ENABLE_AAF:-false}
+AAF_EXT_IP=${AAF_EXT_IP:-{{.Values.config.aafExtIP}}}
+AAF_EXT_FQDN=${AAF_EXT_FQDN:-{{.Values.config.aafExtFQDN}}}
appcInstallStartTime=$(date +%s)
@@ -143,8 +146,13 @@ then
echo "" >> ${ODL_HOME}/etc/system.properties
echo "Copying the aaa shiro configuration into opendaylight"
- cp ${APPC_HOME}/data/aaa-app-config.xml ${ODL_HOME}/etc/opendaylight/datastore/initial/config/aaa-app-config.xml
-
+ if $ENABLE_AAF
+ then
+ echo "${AAF_EXT_IP} ${AAF_EXT_FQDN}" >> /etc/hosts
+ cp ${APPC_HOME}/data/properties/aaa-app-config.xml ${ODL_HOME}/etc/opendaylight/datastore/initial/config/aaa-app-config.xml
+ else
+ cp ${APPC_HOME}/data/aaa-app-config.xml ${ODL_HOME}/etc/opendaylight/datastore/initial/config/aaa-app-config.xml
+ fi
echo "Restarting OpenDaylight"
${ODL_HOME}/bin/stop
@@ -173,5 +181,8 @@ fi
appcInstallEndTime=$(date +%s)
echo "Total Appc install took $(expr $appcInstallEndTime - $appcInstallStartTime) seconds"
+echo "Starting cdt-proxy-service jar, logging to ${APPC_HOME}/cdt-proxy-service/jar.log"
+java -jar ${APPC_HOME}/cdt-proxy-service/cdt-proxy-service.jar > ${APPC_HOME}/cdt-proxy-service/jar.log &
+
exec ${ODL_HOME}/bin/karaf
diff --git a/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaa-app-config.xml b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaa-app-config.xml
new file mode 100644
index 0000000000..31bc4e31de
--- /dev/null
+++ b/kubernetes/appc/resources/config/appc/opt/onap/appc/data/properties/aaa-app-config.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0" ?>
+<!--
+###
+# ============LICENSE_START=======================================================
+# APPC
+# ================================================================================
+# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+###
+ -->
+
+<shiro-configuration xmlns="urn:opendaylight:aaa:app:config">
+
+ <!--
+ ================================= TokenAuthRealm ==================================
+ = =
+ = Use org.onap.aaf.cadi.shiro.AAFRealm to enable AAF authentication =
+ = Use org.opendaylight.aaa.shiro.realm.TokenAuthRealm =
+ ===================================================================================
+ -->
+ <main>
+ <pair-key>tokenAuthRealm</pair-key>
+<!-- <pair-value>org.opendaylight.aaa.shiro.realm.TokenAuthRealm</pair-value> -->
+ <pair-value>org.onap.aaf.cadi.shiro.AAFRealm</pair-value>
+ </main>
+
+
+ <!-- add tokenAuthRealm as the only default realm -->
+ <main>
+ <pair-key>securityManager.realms</pair-key>
+ <pair-value>$tokenAuthRealm</pair-value>
+ </main>
+
+ <!-- Used to support OAuth2 use case. -->
+ <main>
+ <pair-key>authcBasic</pair-key>
+ <pair-value>org.opendaylight.aaa.shiro.filters.ODLHttpAuthenticationFilter</pair-value>
+ </main>
+
+ <!-- in order to track AAA challenge attempts -->
+ <main>
+ <pair-key>accountingListener</pair-key>
+ <pair-value>org.opendaylight.aaa.shiro.filters.AuthenticationListener</pair-value>
+ </main>
+ <main>
+ <pair-key>securityManager.authenticator.authenticationListeners</pair-key>
+ <pair-value>$accountingListener</pair-value>
+ </main>
+
+ <!-- Model based authorization scheme supporting RBAC for REST endpoints -->
+ <main>
+ <pair-key>dynamicAuthorization</pair-key>
+ <pair-value>org.opendaylight.aaa.shiro.realm.MDSALDynamicAuthorizationFilter</pair-value>
+ </main>
+
+
+ <!--
+ ===================================================================================
+ = URLS =
+ = For AAF use <pair-value> authcBasic, roles[org.onap.appc.odl|odl-api\*] =
+ = org.onap.appc.odl|odl-api|* can be replaced with other AAF permissions =
+ = For default <pair-value> authcBasic, roles[admin] =
+ ===================================================================================
+ -->
+
+ <!-- restrict access to some endpoints by default -->
+ <urls>
+ <pair-key>/auth/**</pair-key>
+<!-- <pair-value>authcBasic, roles[admin], dynamicAuthorization</pair-value> -->
+ <pair-value>authcBasic, roles[org.onap.appc.odl|odl-admin|*]</pair-value>
+ </urls>
+ <urls>
+ <pair-key>/restconf/config/aaa-cert-mdsal**</pair-key>
+<!-- <pair-value>authcBasic, roles[admin]</pair-value> -->
+ <pair-value>authcBasic, roles[org.onap.appc.odl|odl-admin|*]</pair-value>
+ </urls>
+ <urls>
+ <pair-key>/restconf/operational/aaa-cert-mdsal**</pair-key>
+<!-- <pair-value>authcBasic, roles[admin]</pair-value> -->
+ <pair-value>authcBasic, roles[org.onap.appc.odl|odl-admin|*]</pair-value>
+ </urls>
+ <urls>
+ <pair-key>/restconf/operations/aaa-cert-rpc**</pair-key>
+<!-- <pair-value>authcBasic, roles[admin]</pair-value> -->
+ <pair-value>authcBasic, roles[org.onap.appc.odl|odl-admin|*]</pair-value>
+ </urls>
+ <urls>
+ <pair-key>/restconf/config/aaa-authn-model**</pair-key>
+<!-- <pair-value>authcBasic, roles[admin]</pair-value> -->
+ <pair-value>authcBasic, roles[org.onap.appc.odl|odl-admin|*]</pair-value>
+ </urls>
+ <urls>
+ <pair-key>/restconf/operational/aaa-authn-model**</pair-key>
+<!-- <pair-value>authcBasic, roles[admin]</pair-value> -->
+ <pair-value>authcBasic, roles[org.onap.appc.odl|odl-admin|*]</pair-value>
+ </urls>
+ <urls>
+ <pair-key>/restconf/operations/cluster-admin**</pair-key>
+<!-- <pair-value>authcBasic, roles[admin]</pair-value> -->
+ <pair-value>authcBasic, roles[org.onap.appc.odl|odl-admin|*]</pair-value>
+ </urls>
+ <urls>
+ <pair-key>/**</pair-key>
+<!-- <pair-value>authcBasic, roles[admin]</pair-value> -->
+ <pair-value>authcBasic, roles[org.onap.appc.odl|odl-api|*]</pair-value>
+ </urls>
+</shiro-configuration>
+
diff --git a/kubernetes/appc/templates/service.yaml b/kubernetes/appc/templates/service.yaml
index 6c85985854..733c4ca3be 100644
--- a/kubernetes/appc/templates/service.yaml
+++ b/kubernetes/appc/templates/service.yaml
@@ -31,6 +31,10 @@ spec:
- port: {{ .Values.service.externalPort2 }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort2 }}
name: "{{ .Values.service.portName }}-1830"
+ - port: {{ .Values.service.externalPort3 }}
+ targetPort: {{ .Values.service.internalPort3 }}
+ nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort3 }}
+ name: "{{ .Values.service.portName }}-9090"
{{- else -}}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
@@ -38,6 +42,9 @@ spec:
- port: {{ .Values.service.externalPort2 }}
targetPort: {{ .Values.service.internalPort2 }}
name: {{ .Values.service.portName }}-1830
+ - port: {{ .Values.service.externalPort3 }}
+ targetPort: {{ .Values.service.internalPort3 }}
+ name: {{ .Values.service.portName }}-9090
{{- end}}
selector:
app: {{ include "common.name" . }}
diff --git a/kubernetes/appc/templates/statefulset.yaml b/kubernetes/appc/templates/statefulset.yaml
index d2da2ec756..791d93393e 100644
--- a/kubernetes/appc/templates/statefulset.yaml
+++ b/kubernetes/appc/templates/statefulset.yaml
@@ -45,8 +45,9 @@ spec:
- containerPort: {{ .Values.service.internalPort }}
- containerPort: {{ .Values.service.externalPort2 }}
readinessProbe:
- tcpSocket:
- port: {{ .Values.service.internalPort }}
+ exec:
+ command:
+ - /opt/appc/bin/health_check.sh
initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.readiness.periodSeconds }}
env:
@@ -61,6 +62,8 @@ spec:
value: "{{ .Values.config.configDir }}"
- name: DMAAP_TOPIC_ENV
value: "{{ .Values.config.dmaapTopic }}"
+ - name: ENABLE_AAF
+ value: "{{ .Values.config.enableAAF }}"
- name: ENABLE_ODL_CLUSTER
value: "{{ .Values.config.enableClustering }}"
- name: APPC_REPLICAS
@@ -81,6 +84,9 @@ spec:
- mountPath: /opt/onap/appc/data/properties/aaiclient.properties
name: onap-appc-data-properties
subPath: aaiclient.properties
+ - mountPath: /opt/onap/appc/data/properties/aaa-app-config.xml
+ name: onap-appc-data-properties
+ subPath: aaa-app-config.xml
- mountPath: /opt/onap/appc/svclogic/config/svclogic.properties
name: onap-appc-svclogic-config
subPath: svclogic.properties
@@ -93,6 +99,9 @@ spec:
- mountPath: /opt/onap/appc/bin/installAppcDb.sh
name: onap-appc-bin
subPath: installAppcDb.sh
+ - mountPath: /opt/onap/appc/bin/health_check.sh
+ name: onap-appc-bin
+ subPath: health_check.sh
- mountPath: /opt/onap/ccsdk/data/properties/dblib.properties
name: onap-sdnc-data-properties
subPath: dblib.properties
diff --git a/kubernetes/appc/values.yaml b/kubernetes/appc/values.yaml
index 79991f51a4..1c20977b90 100644
--- a/kubernetes/appc/values.yaml
+++ b/kubernetes/appc/values.yaml
@@ -29,7 +29,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/appc-image:1.3.0-SNAPSHOT-latest
+image: onap/appc-image:1.4.0-SNAPSHOT-latest
pullPolicy: Always
# flag to enable debugging - application support required
@@ -37,7 +37,10 @@ debugEnabled: false
# application configuration
config:
+ aafExtIP: 127.0.0.1
+ aafExtFQDN: aaf-onap-beijing-test.osaaf.org
dbRootPassword: openECOMP1.0
+ enableAAF: false
enableClustering: true
configDir: /opt/onap/appc/data/properties
dmaapTopic: SUCCESS
@@ -70,6 +73,9 @@ dgbuilder:
service:
name: appc-dgbuilder
+#passing value to cdt chart. value of nodePort3 will be same as appc.service.nodePort3.
+appc-cdt:
+ nodePort3: 11
# default number of instances
replicaCount: 1
@@ -103,6 +109,10 @@ service:
nodePort2: 31
clusterPort: 2550
+ internalPort3: 9191
+ externalPort3: 9090
+ nodePort3: 11
+
## Persist data to a persitent volume
persistence:
enabled: true
diff --git a/kubernetes/clamp/charts/clamp-dash-es/values.yaml b/kubernetes/clamp/charts/clamp-dash-es/values.yaml
index 7a8becf66f..19e85fde7a 100644
--- a/kubernetes/clamp/charts/clamp-dash-es/values.yaml
+++ b/kubernetes/clamp/charts/clamp-dash-es/values.yaml
@@ -97,7 +97,7 @@ service:
ingress:
enabled: false
-resources: {}
+#resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
@@ -108,10 +108,10 @@ resources: {}
# ref: http://kubernetes.io/docs/user-guide/compute-resources/
# Minimum memory for development is 2 CPU cores and 4GB memory
# Minimum memory for production is 4 CPU cores and 8GB memory
-#resources:
-# limits:
-# cpu: 2
-# memory: 4Gi
-# requests:
-# cpu: 2
-# memory: 4Gi
+resources:
+ limits:
+ cpu: 1
+ memory: 4Gi
+ requests:
+ cpu: 10m
+ memory: 2.5Gi
diff --git a/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml b/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml
index 60a504d515..e4987b093a 100644
--- a/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml
+++ b/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml
@@ -82,7 +82,7 @@ service:
ingress:
enabled: false
-resources: {}
+#resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
@@ -93,10 +93,11 @@ resources: {}
# ref: http://kubernetes.io/docs/user-guide/compute-resources/
# Minimum memory for development is 2 CPU cores and 4GB memory
# Minimum memory for production is 4 CPU cores and 8GB memory
-#resources:
-# limits:
-# cpu: 2
-# memory: 4Gi
-# requests:
-# cpu: 2
-# memory: 4Gi
+resources:
+ limits:
+ cpu: 1
+ memory: 2Gi
+ requests:
+ cpu: 10m
+ memory: 750Mi
+
diff --git a/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml b/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml
index bb12210398..904798077a 100644
--- a/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml
+++ b/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml
@@ -80,7 +80,7 @@ service:
ingress:
enabled: false
-resources: {}
+#resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
@@ -91,10 +91,10 @@ resources: {}
# ref: http://kubernetes.io/docs/user-guide/compute-resources/
# Minimum memory for development is 2 CPU cores and 4GB memory
# Minimum memory for production is 4 CPU cores and 8GB memory
-#resources:
-# limits:
-# cpu: 2
-# memory: 4Gi
-# requests:
-# cpu: 2
-# memory: 4Gi
+resources:
+ limits:
+ cpu: 1
+ memory: 1.3Gi
+ requests:
+ cpu: 10m
+ memory: 750Mi
diff --git a/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf
index 0be1bd7aa3..c6631fb4ef 100644
--- a/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf
+++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf
@@ -141,10 +141,10 @@ binlog_format=row
##innodb_log_group_home_dir = //opt/app/mysql/iblogs
# You can set .._buffer_pool_size up to 50 - 80 %
# of RAM but beware of setting memory usage too high
-innodb_buffer_pool_size = 6380M
+innodb_buffer_pool_size = 128M
#innodb_additional_mem_pool_size = 2M
# Set .._log_file_size to 25 % of buffer pool size
-innodb_log_file_size = 150M
+innodb_log_file_size = 10M
innodb_log_files_in_group = 3
innodb_log_buffer_size = 8M
#innodb_flush_log_at_trx_commit = 1
@@ -156,7 +156,7 @@ transaction-isolation=READ-COMMITTED
####### Galera parameters #######
## Galera Provider configuration
wsrep_provider=/usr/lib/galera/libgalera_smm.so
-wsrep_provider_options="gcache.size=2G; gcache.page_size=1G"
+wsrep_provider_options="gcache.size=128M; gcache.page_size=10M"
## Galera Cluster configuration
wsrep_cluster_name="MSO-automated-tests-cluster"
wsrep_cluster_address="gcomm://"
@@ -169,7 +169,7 @@ wsrep_sst_method=rsync
## Galera Node configuration
wsrep_node_name="mariadb1"
##wsrep_node_address="192.169.3.184"
-wsrep_on=ON
+wsrep_on=OFF
## Status notification
#wsrep_notify_cmd=/opt/app/mysql/bin/wsrep_notify
#######
diff --git a/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql
index 7530d5a161..308ec7da62 100644
--- a/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql
+++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql
@@ -17,13 +17,6 @@
# Create CLDS database objects (tables, etc.)
#
#
-CREATE DATABASE `camundabpm`;
-USE `camundabpm`;
-DROP USER 'camunda';
-CREATE USER 'camunda';
-GRANT ALL on camundabpm.* to 'camunda' identified by 'ndMSpw4CAM' with GRANT OPTION;
-FLUSH PRIVILEGES;
-
CREATE DATABASE `cldsdb4`;
USE `cldsdb4`;
DROP USER 'clds';
diff --git a/kubernetes/clamp/charts/mariadb/values.yaml b/kubernetes/clamp/charts/mariadb/values.yaml
index da62319cb4..459040549d 100644
--- a/kubernetes/clamp/charts/mariadb/values.yaml
+++ b/kubernetes/clamp/charts/mariadb/values.yaml
@@ -85,7 +85,7 @@ ingress:
enabled: false
-resources: {}
+#resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
@@ -96,10 +96,10 @@ resources: {}
# ref: http://kubernetes.io/docs/user-guide/compute-resources/
# Minimum memory for development is 2 CPU cores and 4GB memory
# Minimum memory for production is 4 CPU cores and 8GB memory
-#resources:
-# limits:
-# cpu: 2
-# memory: 4Gi
-# requests:
-# cpu: 2
-# memory: 4Gi
+resources:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 200Mi
diff --git a/kubernetes/config/docker/init/src/config/log/filebeat/logback/filebeat.yml b/kubernetes/clamp/resources/config/log/filebeat/filebeat.yml
index 7b78c9ba46..44c0e6a8ac 100644
--- a/kubernetes/config/docker/init/src/config/log/filebeat/logback/filebeat.yml
+++ b/kubernetes/clamp/resources/config/log/filebeat/filebeat.yml
@@ -21,7 +21,7 @@ output.logstash:
#List of logstash server ip addresses with port number.
#But, in our case, this will be the loadbalancer IP address.
#For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
- hosts: ["logstash.namespace-placeholder:5044"]
+ hosts: ["{{.Values.config.log.logstashServiceName}}:{{.Values.config.log.logstashPort}}"]
#If enable will do load balancing among availabe Logstash, automatically.
loadbalance: true
diff --git a/kubernetes/clamp/resources/config/logback.xml b/kubernetes/clamp/resources/config/logback.xml
new file mode 100644
index 0000000000..0a8f8d5f84
--- /dev/null
+++ b/kubernetes/clamp/resources/config/logback.xml
@@ -0,0 +1,236 @@
+<configuration scan="true" scanPeriod="10 seconds" debug="false">
+ <jmxConfigurator />
+ <property resource="application.properties" />
+ <property name="logDirectory" value="/var/log/onap/clamp" />
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <!-- filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter -->
+ <!-- deny all events with a level below INFO, that is TRACE and DEBUG -->
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n
+ </pattern>
+ </encoder>
+ </appender>
+
+ <appender name="ERROR"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ <level>INFO</level>
+ </filter>
+ <file>${logDirectory}/error.log</file>
+ <append>true</append>
+ <encoder>
+ <pattern>%date{"yyyy-MM-dd'T'HH:mm:ss,SSSXXX", UTC}|%X{RequestId}|%.20thread|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%.-5level|%X{ErrorCode}|%X{ErrorDescription}|%msg%n</pattern>
+ </encoder>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <maxIndex>10</maxIndex>
+ <FileNamePattern>${logDirectory}/error.%i.log.zip
+ </FileNamePattern>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>10MB</maxFileSize>
+ </triggeringPolicy>
+ </appender>
+
+ <appender name="DEBUG"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/debug.log</file>
+ <append>true</append>
+ <encoder>
+ <pattern>%date{"yyyy-MM-dd'T'HH:mm:ss,SSSXXX",UTC}|%X{RequestId}|%.20thread|%.-5level|%logger{36}|%msg%n</pattern>
+ </encoder>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <fileNamePattern>${logDirectory}/debug.%d{yyyy-MM-dd}.%i.log.zip</fileNamePattern>
+ <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
+ <maxFileSize>10MB</maxFileSize>
+ </timeBasedFileNamingAndTriggeringPolicy>
+ <maxHistory>5</maxHistory>
+ </rollingPolicy>
+ </appender>
+
+ <appender name="AUDIT"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/audit.log</file>
+ <append>true</append>
+ <encoder>
+ <pattern>%X{BeginTimestamp}|%X{EndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%.20thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ElapsedTime}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}|%msg%n
+ </pattern>
+ </encoder>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <maxIndex>10</maxIndex>
+ <FileNamePattern>${logDirectory}/audit.%i.log.zip
+ </FileNamePattern>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>10MB</maxFileSize>
+ </triggeringPolicy>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="AUDIT" />
+ </appender>
+
+ <appender name="METRIC"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/metric.log</file>
+ <append>true</append>
+ <encoder>
+ <pattern>%X{BeginTimestamp}|%X{EndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%.20thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ElapsedTime}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{TargetVirtualEntity}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}|
+ %msg%n</pattern>
+ </encoder>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <maxIndex>10</maxIndex>
+ <FileNamePattern>${logDirectory}/metric.%i.log.zip
+ </FileNamePattern>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>10MB</maxFileSize>
+ </triggeringPolicy>
+ </appender>
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="METRIC" />
+ </appender>
+
+ <!-- SECURITY related loggers -->
+ <appender name="SECURITY"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/security.log</file>
+ <append>true</append>
+ <encoder>
+ <pattern>%X{BeginTimestamp}|%X{EndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%.20thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ElapsedTime}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}|%msg%n
+ </pattern>
+ </encoder>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <maxIndex>10</maxIndex>
+ <FileNamePattern>${logDirectory}/security.%i.log.zip
+ </FileNamePattern>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>10MB</maxFileSize>
+ </triggeringPolicy>
+ </appender>
+ <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="SECURITY" />
+ </appender>
+
+ <!-- CLDS related loggers -->
+ <logger name="org.onap.clamp.clds" level="INFO" additivity="true">
+ <appender-ref ref="ERROR" />
+ </logger>
+
+ <!-- CLDS related loggers -->
+ <logger name="com.att.eelf.error" level="OFF" additivity="true">
+ <appender-ref ref="ERROR" />
+ </logger>
+ <!-- EELF related loggers -->
+ <logger name="com.att.eelf.audit" level="INFO" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+ <logger name="com.att.eelf.metrics" level="DEBUG" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+ <logger name="com.att.eelf.security" level="DEBUG" additivity="false">
+ <appender-ref ref="asyncEELFSecurity" />
+ </logger>
+
+
+ <!-- Spring related loggers -->
+ <logger name="org.springframework" level="WARN" />
+ <logger name="org.springframework.beans" level="WARN" />
+ <logger name="org.springframework.web" level="WARN" />
+ <logger name="com.blog.spring.jms" level="WARN" />
+
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger name="net.sf" level="WARN" />
+ <logger name="org.apache.commons.httpclient" level="WARN" />
+ <logger name="org.apache.commons" level="WARN" />
+ <logger name="org.apache.coyote" level="WARN" />
+ <logger name="org.apache.jasper" level="WARN" />
+
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger name="org.apache.camel" level="WARN" />
+ <logger name="org.apache.cxf" level="WARN" />
+ <logger name="org.apache.camel.processor.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" />
+ <logger name="org.apache.cxf.service" level="WARN" />
+ <logger name="org.restlet" level="DEBUG" />
+ <logger name="org.apache.camel.component.restlet" level="DEBUG" />
+
+ <!-- logback internals logging -->
+ <logger name="ch.qos.logback.classic" level="INFO" />
+ <logger name="ch.qos.logback.core" level="INFO" />
+
+ <!-- logback jms appenders & loggers definition starts here -->
+ <!-- logback jms appenders & loggers definition starts here -->
+ <appender name="auditLogs"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ </filter>
+ <file>${logDirectory}/Audit.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/Audit.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+ <appender name="perfLogs"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ </filter>
+ <file>${logDirectory}/Perform.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/Perform.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+ <logger name="AuditRecord" level="INFO" additivity="FALSE">
+ <appender-ref ref="auditLogs" />
+ </logger>
+ <logger name="AuditRecord_DirectCall" level="INFO" additivity="FALSE">
+ <appender-ref ref="auditLogs" />
+ </logger>
+ <logger name="PerfTrackerRecord" level="INFO" additivity="FALSE">
+ <appender-ref ref="perfLogs" />
+ </logger>
+ <!-- logback jms appenders & loggers definition ends here -->
+
+ <root level="WARN">
+ <appender-ref ref="DEBUG" />
+ <appender-ref ref="STDOUT" />
+ </root>
+
+</configuration>
diff --git a/kubernetes/clamp/templates/configmap.yaml b/kubernetes/clamp/templates/configmap.yaml
index 7a66c64755..e0ed211717 100644
--- a/kubernetes/clamp/templates/configmap.yaml
+++ b/kubernetes/clamp/templates/configmap.yaml
@@ -25,3 +25,11 @@ metadata:
data:
{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
spring_application_json: {{ tpl .Values.config.springApplicationJson . | quote }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Release.Name }}-clamp-filebeat-configmap
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/*").AsConfig . | indent 2 }}
diff --git a/kubernetes/clamp/templates/deployment.yaml b/kubernetes/clamp/templates/deployment.yaml
index 38eabeb968..bc0eac7b88 100644
--- a/kubernetes/clamp/templates/deployment.yaml
+++ b/kubernetes/clamp/templates/deployment.yaml
@@ -46,9 +46,24 @@ spec:
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
name: {{ include "common.name" . }}-readiness
containers:
+ # side car containers
+ - name: {{ include "common.name" . }}-filebeat-onap
+ image: "{{ .Values.global.loggingRepository }}/{{ .Values.global.loggingImage }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ volumeMounts:
+ - name: {{ include "common.fullname" . }}-filebeat-conf
+ mountPath: /usr/share/filebeat/filebeat.yml
+ subPath: filebeat.yml
+ - name: {{ include "common.fullname" . }}-data-filebeat
+ mountPath: /usr/share/filebeat/data
+ - name: {{ include "common.fullname" . }}-logs
+ mountPath: /var/log/onap
- name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ args:
+ - "-Dcom.att.eelf.logging.file=file:/opt/clamp/logback.xml"
+ - ""
ports:
- containerPort: {{ .Values.service.internalPort }}
# disable liveness probe when breakpoints set in debugger
@@ -66,9 +81,14 @@ spec:
initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.readiness.periodSeconds }}
volumeMounts:
+ - name: {{ include "common.fullname" . }}-logs
+ mountPath: /var/log/onap
- mountPath: /opt/clamp/sdc-controllers-config.json
name: {{ include "common.fullname" . }}-config
subPath: sdc-controllers-config.json
+ - mountPath: /opt/clamp/logback.xml
+ name: {{ include "common.fullname" . }}-config
+ subPath: logback.xml
env:
- name: SPRING_APPLICATION_JSON
valueFrom:
@@ -92,5 +112,14 @@ spec:
items:
- key: sdc-controllers-config.json
path: sdc-controllers-config.json
+ - key: logback.xml
+ path: logback.xml
+ - name: {{ include "common.fullname" . }}-filebeat-conf
+ configMap:
+ name: {{ .Release.Name }}-clamp-filebeat-configmap
+ - name: {{ include "common.fullname" . }}-data-filebeat
+ emptyDir: {}
+ - name: {{ include "common.fullname" . }}-logs
+ emptyDir: {}
imagePullSecrets:
- name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/clamp/values.yaml b/kubernetes/clamp/values.yaml
index 751e4a0c11..86c55df3bd 100644
--- a/kubernetes/clamp/values.yaml
+++ b/kubernetes/clamp/values.yaml
@@ -27,7 +27,7 @@ subChartsOnly:
# application image
repository: nexus3.onap.org:10001
-image: onap/clamp:2.0-STAGING-latest
+image: onap/clamp:2.0.2
pullPolicy: Always
# flag to enable debugging - application support required
@@ -37,6 +37,9 @@ debugEnabled: false
# Application configuration defaults.
#################################################################
config:
+ log:
+ logstashServiceName: log-ls
+ logstashPort: 5044
mysqlPassword: strong_pitchou
dataRootDir: /dockerdata-nfs
springApplicationJson: >
@@ -50,10 +53,10 @@ config:
"clamp.config.files.sdcController": "file:/opt/clamp/sdc-controllers-config.json",
"clamp.config.dcae.inventory.url": "http://inventory.{{ include "common.namespace" . }}:8080",
"clamp.config.dcae.dispatcher.url": "http://deployment-handler.{{ include "common.namespace" . }}:8443",
- "clamp.config.policy.pdpUrl1": "https://pdp.{{ include "common.namespace" . }}:9091/pdp/ , testpdp, alpha123",
- "clamp.config.policy.pdpUrl2": "https://pdp.{{ include "common.namespace" . }}:9091/pdp/ , testpdp, alpha123",
- "clamp.config.policy.papUrl": "https://pap.{{ include "common.namespace" . }}:8443/pap/ , testpap, alpha123",
- "clamp.config.policy.clientKey": "5CE79532B3A2CB4D132FC0C04BF916A7"
+ "clamp.config.policy.pdpUrl1": "http://pdp.{{ include "common.namespace" . }}:8081/pdp/ , testpdp, alpha123",
+ "clamp.config.policy.pdpUrl2": "http://pdp.{{ include "common.namespace" . }}:8081/pdp/ , testpdp, alpha123",
+ "clamp.config.policy.papUrl": "http://pap.{{ include "common.namespace" . }}:9091/pap/ , testpap, alpha123",
+ "clamp.config.policy.clientKey": "dGVzdA=="
}
# subchart configuration
@@ -91,7 +94,7 @@ service:
ingress:
enabled: false
-resources: {}
+#resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
@@ -102,10 +105,10 @@ resources: {}
# ref: http://kubernetes.io/docs/user-guide/compute-resources/
# Minimum memory for development is 2 CPU cores and 4GB memory
# Minimum memory for production is 4 CPU cores and 8GB memory
-#resources:
-# limits:
-# cpu: 2
-# memory: 4Gi
-# requests:
-# cpu: 2
-# memory: 4Gi
+resources:
+ limits:
+ cpu: 1
+ memory: 1.2Gi
+ requests:
+ cpu: 10m
+ memory: 800Mi
diff --git a/kubernetes/cli/values.yaml b/kubernetes/cli/values.yaml
index bd52ced25f..6bfd793979 100644
--- a/kubernetes/cli/values.yaml
+++ b/kubernetes/cli/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/cli:2.0-STAGING-latest
+image: onap/cli:2.0.2
pullPolicy: Always
# application configuration
diff --git a/kubernetes/common/dgbuilder/resources/config/svclogic.properties b/kubernetes/common/dgbuilder/resources/config/svclogic.properties
index dc3980de21..82eeec5e61 100644
--- a/kubernetes/common/dgbuilder/resources/config/svclogic.properties
+++ b/kubernetes/common/dgbuilder/resources/config/svclogic.properties
@@ -2,4 +2,4 @@ org.onap.ccsdk.sli.dbtype=jdbc
org.onap.ccsdk.sli.jdbc.url=jdbc:mysql://{{.Values.config.dbServiceName}}.{{ include "common.namespace" . }}:3306/sdnctl
org.onap.ccsdk.sli.jdbc.database=sdnctl
org.onap.ccsdk.sli.jdbc.user=sdnctl
-org.onap.ccsdk.sli.jdbc.password=gamma
+org.onap.ccsdk.sli.jdbc.password={{.Values.config.dbSdnctlPassword}}
diff --git a/kubernetes/common/dgbuilder/resources/scripts/createReleaseDir.sh b/kubernetes/common/dgbuilder/resources/scripts/createReleaseDir.sh
index 76d17700b0..961792be3f 100755
--- a/kubernetes/common/dgbuilder/resources/scripts/createReleaseDir.sh
+++ b/kubernetes/common/dgbuilder/resources/scripts/createReleaseDir.sh
@@ -19,7 +19,7 @@ dbHost="{{.Values.config.dbServiceName}}.{{.Release.Namespace}}"
dbPort="3306"
dbName="sdnctl"
dbUser="sdnctl"
-dbPassword="gamma"
+dbPassword="{{.Values.config.dbSdnctlPassword}}"
gitLocalRepository="$4"
lastPort=$(find "releases/" -name "customSettings.js" |xargs grep uiPort|cut -d: -f2|sed -e s/,//|sort|tail -1)
@@ -99,7 +99,7 @@ then
echo "org.onap.ccsdk.sli.jdbc.url=jdbc:mysql://{{.Values.config.dbServiceName}}.{{.Release.Namespace}}:3306/sdnctl" >>$svclogicPropFile
echo "org.onap.ccsdk.sli.jdbc.database=sdnctl" >>$svclogicPropFile
echo "org.onap.ccsdk.sli.jdbc.user=sdnctl" >>$svclogicPropFile
- echo "org.onap.ccsdk.sli.jdbc.password=gamma" >>$svclogicPropFile
+ echo "org.onap.ccsdk.sli.jdbc.password={{.Values.config.dbSdnctlPassword}}" >>$svclogicPropFile
fi
if [ ! -e "${appDir}/flowShareUsers.js" ]
then
diff --git a/kubernetes/common/dgbuilder/resources/scripts/customSettings.js b/kubernetes/common/dgbuilder/resources/scripts/customSettings.js
index 22810baabd..e3349b9d5a 100644
--- a/kubernetes/common/dgbuilder/resources/scripts/customSettings.js
+++ b/kubernetes/common/dgbuilder/resources/scripts/customSettings.js
@@ -27,25 +27,25 @@ module.exports={
"userDir": "releases/sdnc1.0",
"httpAuth": {
"user": "dguser",
- "pass": "cc03e747a6afbbcbf8be7668acfebee5"
+ "pass": "{{.Values.config.dgUserPassword}}"
},
"dbHost": "{{.Values.config.dbServiceName}}.{{ include "common.namespace" . }}",
"dbPort": "3306",
"dbName": "sdnctl",
"dbUser": "sdnctl",
- "dbPassword": "gamma",
+ "dbPassword": "{{.Values.config.dbSdnctlPassword}}",
"gitLocalRepository": "",
"httpRoot": "/",
"disableEditor": false,
"httpAdminRoot": "/",
"httpAdminAuth": {
"user": "dguser",
- "pass": "cc03e747a6afbbcbf8be7668acfebee5"
+ "pass": "{{.Values.config.dgUserPassword}}"
},
"httpNodeRoot": "/",
"httpNodeAuth": {
"user": "dguser",
- "pass": "cc03e747a6afbbcbf8be7668acfebee5"
+ "pass": "{{.Values.config.dgUserPassword}}"
},
"uiHost": "0.0.0.0",
"version": "0.9.1",
diff --git a/kubernetes/common/dgbuilder/values.yaml b/kubernetes/common/dgbuilder/values.yaml
index e7c18c4140..759b940aea 100644
--- a/kubernetes/common/dgbuilder/values.yaml
+++ b/kubernetes/common/dgbuilder/values.yaml
@@ -33,7 +33,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/ccsdk-dgbuilder-image:0.2-STAGING-latest
+image: onap/ccsdk-dgbuilder-image:0.2.4
pullPolicy: Always
# flag to enable debugging - application support required
@@ -42,9 +42,11 @@ debugEnabled: false
# application configuration
config:
dbRootPassword: openECOMP1.0
+ dbSdnctlPassword: gamma
dbPodName: mysql-db
dbServiceName: sdnc-dbhost
-
+ # MD5 hash of dguser password ( default: test123 )
+ dgUserPassword: cc03e747a6afbbcbf8be7668acfebee5
# default number of instances
replicaCount: 1
diff --git a/kubernetes/config/.helmignore b/kubernetes/config/.helmignore
deleted file mode 100644
index 4c38baed31..0000000000
--- a/kubernetes/config/.helmignore
+++ /dev/null
@@ -1,25 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-
-#ignore config docker image files
-docker
-createConfig.sh \ No newline at end of file
diff --git a/kubernetes/config/createConfig.sh b/kubernetes/config/createConfig.sh
deleted file mode 100755
index 154bad58ca..0000000000
--- a/kubernetes/config/createConfig.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-usage() {
- cat <<EOF
-Usage: $0 [PARAMs]
--u : Display usage
--n [NAMESPACE] : Kubernetes namespace (required)
-EOF
-}
-
-create_namespace() {
- kubectl create namespace $1
-}
-
-create_configuration() {
- create_namespace $1
- helm install . --name "$1-config" --namespace $1 --set nsPrefix=$1
-}
-
-#MAINs
-NS=
-
-while getopts ":n:u:" PARAM; do
- case $PARAM in
- u)
- usage
- exit 1
- ;;
- n)
- NS=${OPTARG}
- ;;
- ?)
- usage
- exit
- ;;
- esac
-done
-
-if [[ -z $NS ]]; then
- usage
- exit 1
-fi
-
-printf "\n**** Creating configuration for ONAP instance: $NS\n"
-create_configuration $NS
-
-printf "**** Done ****\n"
diff --git a/kubernetes/config/docker/init/Dockerfile b/kubernetes/config/docker/init/Dockerfile
deleted file mode 100644
index b6be90b247..0000000000
--- a/kubernetes/config/docker/init/Dockerfile
+++ /dev/null
@@ -1,17 +0,0 @@
-from ubuntu:16.04
-
-ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST"
-# Setup Corporate proxy
-ENV https_proxy ${HTTPS_PROXY}
-ENV http_proxy ${HTTP_PROXY}
-
-# Additional packages
-RUN apt-get update
-RUN apt-get install -y openssl vim-common
-RUN mkdir -p /opt/config/src/
-
-COPY onap-cfg.tar.gz /tmp/
-RUN tar -zxvf /tmp/onap-cfg.tar.gz -C /opt/config/src/
-COPY config-init.sh /root/config-init.sh
-RUN chmod a+x /root/config-init.sh
-ENTRYPOINT /root/config-init.sh
diff --git a/kubernetes/config/docker/init/config-init.sh b/kubernetes/config/docker/init/config-init.sh
deleted file mode 100755
index 0e5fae082a..0000000000
--- a/kubernetes/config/docker/init/config-init.sh
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/bin/bash -x
-
-concat_array() {
- local arr=("$@")
- local str=''
- for i in ${!arr[@]}; do
- if (( $i > 0 )); then
- str="${str};"
- fi
- str="${str}${arr[$i]}"
- done
- echo "$str"
-}
-
-echo "Validating onap-parameters.yaml has been populated"
-[[ -z "$OPENSTACK_UBUNTU_14_IMAGE" ]] && { echo "Error: OPENSTACK_UBUNTU_14_IMAGE must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_PUBLIC_NET_ID" ]] && { echo "Error: OPENSTACK_PUBLIC_NET_ID must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_OAM_NETWORK_ID" ]] && { echo "Error: OPENSTACK_OAM_NETWORK_ID must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_OAM_SUBNET_ID" ]] && { echo "Error: OPENSTACK_OAM_SUBNET_ID must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_OAM_NETWORK_CIDR" ]] && { echo "Error: OPENSTACK_OAM_NETWORK_CIDR must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_USERNAME" ]] && { echo "Error: OPENSTACK_USERNAME must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_TENANT_ID" ]] && { echo "Error: OPENSTACK_TENANT_ID must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_API_KEY" ]] && { echo "Error: OPENSTACK_API_KEY must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_REGION" ]] && { echo "Error: OPENSTACK_REGION must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_KEYSTONE_URL" ]] && { echo "Error: OPENSTACK_KEYSTONE_URL must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_FLAVOUR_MEDIUM" ]] && { echo "Error: OPENSTACK_FLAVOUR_MEDIUM must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_SERVICE_TENANT_NAME" ]] && { echo "Error: OPENSTACK_SERVICE_TENANT_NAME must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$DMAAP_TOPIC" ]] && { echo "Error: DMAAP_TOPIC must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$DEMO_ARTIFACTS_VERSION" ]] && { echo "Error: DEMO_ARTIFACTS_VERSION must be set in onap-parameters.yaml"; exit 1; }
-[[ -z "$OPENSTACK_TENANT_NAME" ]] && { echo "Error: OPENSTACK_TENANT_NAME must be set in onap-parameters.yaml"; exit 1; }
-
-#make NAMESPACE directory
-echo "Creating $NAMESPACE directory if it doesn't exist"
-mkdir -p /config-init/$NAMESPACE/
-
-#unzip the configs in the NAMESPACEs directory ensuring no overwriting of files
-echo "Installing configuration files"
-cp -vnpr /opt/config/src/* /config-init/$NAMESPACE/
-
-#ensure db directories exist.
-mkdir -p /config-init/$NAMESPACE/appc/data/
-mkdir -p /config-init/$NAMESPACE/dcae/pgaas/pgdata/
-mkdir -p /config-init/$NAMESPACE/portal/mariadb/data/
-mkdir -p /config-init/$NAMESPACE/portal/logs/
-mkdir -p /config-init/$NAMESPACE/sdnc/data/
-mkdir -p /config-init/$NAMESPACE/vid/mariadb/data/
-mkdir -p /config-init/$NAMESPACE/sdc/sdc-cs/CS/
-mkdir -p /config-init/$NAMESPACE/sdc/logs/ASDC/ASDC-ES/
-mkdir -p /config-init/$NAMESPACE/sdc/logs/ASDC/ASDC-CS/
-mkdir -p /config-init/$NAMESPACE/sdc/logs/ASDC/ASDC-KB/
-mkdir -p /config-init/$NAMESPACE/sdc/logs/ASDC/ASDC-BE/
-mkdir -p /config-init/$NAMESPACE/sdc/logs/ASDC/ASDC-FE/
-mkdir -p /config-init/$NAMESPACE/aai/opt/aai/logroot/
-mkdir -p /config-init/$NAMESPACE/aai/model-loader/logs/
-mkdir -p /config-init/$NAMESPACE/aai/aai-traversal/logs/
-mkdir -p /config-init/$NAMESPACE/aai/aai-resources/logs/
-mkdir -p /config-init/$NAMESPACE/aai/sparky-be/logs/
-mkdir -p /config-init/$NAMESPACE/aai/elasticsearch/es-data/
-mkdir -p /config-init/$NAMESPACE/aai/search-data-service/logs/
-mkdir -p /config-init/$NAMESPACE/aai/data-router/logs/
-mkdir -p /config-init/$NAMESPACE/mso/mariadb/data
-mkdir -p /config-init/$NAMESPACE/clamp/mariadb/data
-mkdir -p /config-init/$NAMESPACE/log/elasticsearch/data
-mkdir -p /config-init/$NAMESPACE/consul/consul-agent-config/bin
-mkdir -p /config-init/$NAMESPACE/consul/consul-agent-config/scripts
-mkdir -p /config-init/$NAMESPACE/consul/consul-server-config
-
-echo "Setting permissions to container writeable directories"
-chmod -R 777 /config-init/$NAMESPACE/sdc/logs/
-chmod -R 777 /config-init/$NAMESPACE/portal/logs/
-chmod -R 777 /config-init/$NAMESPACE/aai/aai-config/
-chmod -R 777 /config-init/$NAMESPACE/aai/aai-data/
-chmod -R 777 /config-init/$NAMESPACE/aai/opt/aai/logroot/
-chmod -R 777 /config-init/$NAMESPACE/aai/model-loader/logs/
-chmod -R 777 /config-init/$NAMESPACE/aai/haproxy/log/
-chmod -R 777 /config-init/$NAMESPACE/aai/aai-traversal/logs/
-chmod -R 777 /config-init/$NAMESPACE/aai/aai-resources/logs/
-chmod -R 777 /config-init/$NAMESPACE/aai/sparky-be/logs/
-chmod -R 777 /config-init/$NAMESPACE/aai/elasticsearch/es-data/
-chmod -R 777 /config-init/$NAMESPACE/aai/search-data-service/logs/
-chmod -R 777 /config-init/$NAMESPACE/aai/data-router/logs/
-chmod -R 777 /config-init/$NAMESPACE/log/elasticsearch
-chown -R root:root /config-init/$NAMESPACE/log
-
-# SDNC/Robot preload files manipulation
-OPENSTACK_OAM_NETWORK_CIDR_PREFIX=`cut -d. -f1-3 <<<"$OPENSTACK_OAM_NETWORK_CIDR"`
-# MSO post install steps to encrypt openstack password
-MSO_ENCRYPTION_KEY=$(cat /config-init/$NAMESPACE/mso/mso/encryption.key)
-OPENSTACK_API_ENCRYPTED_KEY=`echo -n "$OPENSTACK_API_KEY" | openssl aes-128-ecb -e -K $MSO_ENCRYPTION_KEY -nosalt | xxd -c 256 -p`
-
-echo "Substituting configuration parameters"
-
-# replace the default 'onap' namespace qualification of K8s hostnames within the config files
-SED_NS_PATHS="/config-init/$NAMESPACE/"
-SED_NS_STRINGS=(
- "s/\.namespace-placeholder/\.${NAMESPACE}/g"
- "s/kubectl -n namespace-placeholder/kubectl -n ${NAMESPACE}/g"
-)
-SED_NS_STRING=$(concat_array "${SED_NS_STRINGS[@]}")
-find $SED_NS_PATHS -type f -exec sed -i -e "${SED_NS_STRING}" {} \;
-
-# set variable parameters
-# ATTENTION: the list of the paths must be verified if more parameters are added!
-SED_CONFIG_PATHS="/config-init/$NAMESPACE/robot/ /config-init/$NAMESPACE/mso/"
-SED_CONFIG_STRINGS=( \
- "s/UBUNTU_14_IMAGE_NAME_HERE/${OPENSTACK_UBUNTU_14_IMAGE}/g" \
- "s/OPENSTACK_PUBLIC_NET_ID_HERE/${OPENSTACK_PUBLIC_NET_ID}/g" \
- "s/OPENSTACK_NETWORK_ID_WITH_ONAP_ROUTE_HERE/${OPENSTACK_OAM_NETWORK_ID}/g" \
- "s/OPENSTACK_SUBNET_ID_WITH_ONAP_ROUTE_HERE/${OPENSTACK_OAM_SUBNET_ID}/g" \
- "s,NETWORK_CIDR_WITH_ONAP_ROUTE_HERE,${OPENSTACK_OAM_NETWORK_CIDR},g" \
- "s/OPENSTACK_USERNAME_HERE/${OPENSTACK_USERNAME}/g" \
- "s/OPENSTACK_TENANT_ID_HERE/${OPENSTACK_TENANT_ID}/g" \
- "s/OPENSTACK_PASSWORD_HERE/${OPENSTACK_API_KEY}/g" \
- "s/OPENSTACK_REGION_HERE/${OPENSTACK_REGION}/g" \
- "s,OPENSTACK_KEYSTONE_IP_HERE,${OPENSTACK_KEYSTONE_URL},g" \
- "s/OPENSTACK_FLAVOUR_MEDIUM_HERE/${OPENSTACK_FLAVOUR_MEDIUM}/g" \
- "s/DMAAP_TOPIC_HERE/${DMAAP_TOPIC}/g" \
- "s/OPENSTACK_SERVICE_TENANT_NAME_HERE/${OPENSTACK_SERVICE_TENANT_NAME}/g" \
- "s/DEMO_ARTIFACTS_VERSION_HERE/${DEMO_ARTIFACTS_VERSION}/g" \
- "s/OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE/${OPENSTACK_OAM_NETWORK_CIDR_PREFIX}/g" \
- "s/OPENSTACK_ENCRYPTED_PASSWORD_HERE/${OPENSTACK_API_ENCRYPTED_KEY}/g" \
- "s/OPENSTACK_TENANT_NAME_HERE/${OPENSTACK_TENANT_NAME}/g" \
-)
-SED_CONFIG_STRING=$(concat_array "${SED_CONFIG_STRINGS[@]}")
-find $SED_CONFIG_PATHS -type f -exec sed -i -e "${SED_CONFIG_STRING}" {} \;
-
-echo "Done!"
diff --git a/kubernetes/config/docker/init/make-tar.sh b/kubernetes/config/docker/init/make-tar.sh
deleted file mode 100755
index 7ec08ceee0..0000000000
--- a/kubernetes/config/docker/init/make-tar.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-cd src/config
-
-TAR=/usr/local/opt/gnu-tar/libexec/gnubin/tar
-OS="`uname`"
-case $OS in
- 'Linux')
- OS='Linux'
- TAR=/usr/bin/tar
- ;;
- 'Darwin')
- OS='Mac'
- ;;
- *) ;;
-esac
-
-$TAR -cvzf ../../onap-cfg.tar.gz *
diff --git a/kubernetes/config/docker/init/src/config/log/filebeat/log4j/filebeat.yml b/kubernetes/config/docker/init/src/config/log/filebeat/log4j/filebeat.yml
deleted file mode 100644
index f15c2bb8e1..0000000000
--- a/kubernetes/config/docker/init/src/config/log/filebeat/log4j/filebeat.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-filebeat.prospectors:
-#it is mandatory, in our case it's log
-- input_type: log
- #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
- paths:
- - /var/log/onap/*/*/*/*.log
- - /var/log/onap/*/*/*.log
- - /var/log/onap/*/*.log
-
- # The below commented properties are for time-based rolling policy. But as the log4j 1.2x does not support time-based rolling these properties are not set
- #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
- #ignore_older: 48h
- # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
- #clean_inactive: 96h
-
- #Multiline properties for log4j xml log events
- multiline.pattern: '</log4j:event>'
- multiline.negate: true
- multiline.match: before
- #multiline.max_lines: 500
- #multiline.timeout: 5s
-
-# Name of the registry file. If a relative path is used, it is considered relative to the
-# data path. Else full qualified file name.
-#filebeat.registry_file: ${path.data}/registry
-
-
-output.logstash:
- #List of logstash server ip addresses with port number.
- #But, in our case, this will be the loadbalancer IP address.
- #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
- hosts: ["logstash.namespace-placeholder:5044"]
- #If enable will do load balancing among availabe Logstash, automatically.
- loadbalance: true
-
- #The list of root certificates for server verifications.
- #If certificate_authorities is empty or not set, the trusted
- #certificate authorities of the host system are used.
- #ssl.certificate_authorities: $ssl.certificate_authorities
-
- #The path to the certificate for SSL client authentication. If the certificate is not specified,
- #client authentication is not available.
- #ssl.certificate: $ssl.certificate
-
- #The client certificate key used for client authentication.
- #ssl.key: $ssl.key
-
- #The passphrase used to decrypt an encrypted key stored in the configured key file
- #ssl.key_passphrase: $ssl.key_passphrase
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/.kafka_cleanshutdown b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/.kafka_cleanshutdown
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/.kafka_cleanshutdown
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/.lock b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/.lock
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/.lock
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-0/00000000000000000000.log
deleted file mode 100644
index 85ee8bff8b..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-0/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-1/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-1/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-1/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-1/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-1/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-CL-1/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST1-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST1-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST1-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST1-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST1-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST1-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-1/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-1/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-1/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-1/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-1/00000000000000000000.log
deleted file mode 100644
index 66dcea954e..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/APPC-TEST2-1/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-0/00000000000000000000.log
deleted file mode 100644
index bb73f23cef..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-0/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-1/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-1/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-1/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-1/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-1/00000000000000000000.log
deleted file mode 100644
index 53364c5905..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/DCAE-CL-EVENT-1/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-INBOX-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-INBOX-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-INBOX-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-INBOX-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-INBOX-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-INBOX-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-APP1-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-APP1-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-APP1-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-APP1-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-APP1-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-APP1-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-DBC1-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-DBC1-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-DBC1-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-DBC1-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-DBC1-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-DBC1-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-POL1-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-POL1-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-POL1-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-POL1-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-POL1-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-POL1-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-SDC1-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-SDC1-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-SDC1-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-SDC1-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-SDC1-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-SDC1-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-VID1-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-VID1-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-VID1-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-VID1-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-VID1-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/ECOMP-PORTAL-OUTBOX-VID1-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-0/00000000000000000000.log
deleted file mode 100644
index b466edad8c..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-0/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-1/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-1/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-1/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-1/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-1/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/PDPD-CONFIGURATION-1/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-0/00000000000000000000.log
deleted file mode 100644
index bc5db56330..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-0/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-1/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-1/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-1/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-1/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-1/00000000000000000000.log
deleted file mode 100644
index 978eeb625c..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/POLICY-CL-MGT-1/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.log
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1-0/00000000000000000000.log
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-0/00000000000000000000.log
deleted file mode 100644
index 7c1c0f66bc..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-0/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-1/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-1/00000000000000000000.index
deleted file mode 100644
index a0afe1dd1e..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-1/00000000000000000000.index
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-1/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-1/00000000000000000000.log
deleted file mode 100644
index e3e471a5f1..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/msgrtr.apinode.metrics.dmaap-1/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/recovery-point-offset-checkpoint b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/recovery-point-offset-checkpoint
deleted file mode 100644
index a003b5d19d..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/recovery-point-offset-checkpoint
+++ /dev/null
@@ -1,27 +0,0 @@
-0
-25
-ECOMP-PORTAL-OUTBOX-VID1 0 0
-PDPD-CONFIGURATION 0 2
-msgrtr.apinode.metrics.dmaap 1 26
-unauthenticated.SEC_MEASUREMENT_OUTPUT 1 1
-APPC-TEST2 0 0
-unauthenticated.TCA_EVENT_OUTPUT 1 1
-APPC-TEST1 0 0
-APPC-CL 0 2
-ECOMP-PORTAL-INBOX 0 0
-APPC-CL 1 0
-APPC-TEST2 1 1
-unauthenticated.TCA_EVENT_OUTPUT 0 1
-unauthenticated.SEC_MEASUREMENT_OUTPUT 0 1
-SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1 0 0
-POLICY-CL-MGT 1 1
-PDPD-CONFIGURATION 1 0
-DCAE-CL-EVENT 1 1
-msgrtr.apinode.metrics.dmaap 0 4
-ECOMP-PORTAL-OUTBOX-APP1 0 0
-ECOMP-PORTAL-OUTBOX-SDC1 0 0
-POLICY-CL-MGT 0 1
-SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1 0 0
-DCAE-CL-EVENT 0 1
-ECOMP-PORTAL-OUTBOX-DBC1 0 0
-ECOMP-PORTAL-OUTBOX-POL1 0 0
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/replication-offset-checkpoint b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/replication-offset-checkpoint
deleted file mode 100644
index a003b5d19d..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/replication-offset-checkpoint
+++ /dev/null
@@ -1,27 +0,0 @@
-0
-25
-ECOMP-PORTAL-OUTBOX-VID1 0 0
-PDPD-CONFIGURATION 0 2
-msgrtr.apinode.metrics.dmaap 1 26
-unauthenticated.SEC_MEASUREMENT_OUTPUT 1 1
-APPC-TEST2 0 0
-unauthenticated.TCA_EVENT_OUTPUT 1 1
-APPC-TEST1 0 0
-APPC-CL 0 2
-ECOMP-PORTAL-INBOX 0 0
-APPC-CL 1 0
-APPC-TEST2 1 1
-unauthenticated.TCA_EVENT_OUTPUT 0 1
-unauthenticated.SEC_MEASUREMENT_OUTPUT 0 1
-SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1 0 0
-POLICY-CL-MGT 1 1
-PDPD-CONFIGURATION 1 0
-DCAE-CL-EVENT 1 1
-msgrtr.apinode.metrics.dmaap 0 4
-ECOMP-PORTAL-OUTBOX-APP1 0 0
-ECOMP-PORTAL-OUTBOX-SDC1 0 0
-POLICY-CL-MGT 0 1
-SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1 0 0
-DCAE-CL-EVENT 0 1
-ECOMP-PORTAL-OUTBOX-DBC1 0 0
-ECOMP-PORTAL-OUTBOX-POL1 0 0
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-0/00000000000000000000.log
deleted file mode 100644
index 33bee2d7ac..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-0/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-1/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-1/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-1/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-1/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-1/00000000000000000000.log
deleted file mode 100644
index 69b1e68398..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.SEC_MEASUREMENT_OUTPUT-1/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-0/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-0/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-0/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-0/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-0/00000000000000000000.log
deleted file mode 100644
index 68a76bc308..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-0/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-1/00000000000000000000.index b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-1/00000000000000000000.index
deleted file mode 100644
index e69de29bb2..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-1/00000000000000000000.index
+++ /dev/null
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-1/00000000000000000000.log b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-1/00000000000000000000.log
deleted file mode 100644
index 89ec482f80..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/kafka-logs/unauthenticated.TCA_EVENT_OUTPUT-1/00000000000000000000.log
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.1 b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.1
deleted file mode 100644
index f3cb13643f..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.1
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.103 b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.103
deleted file mode 100644
index 9b648e28bf..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.103
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.125 b/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.125
deleted file mode 100644
index 0613642554..0000000000
--- a/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper/version-2/log.125
+++ /dev/null
Binary files differ
diff --git a/kubernetes/config/onap-parameters-sample.yaml b/kubernetes/config/onap-parameters-sample.yaml
deleted file mode 100644
index 80f05da6e3..0000000000
--- a/kubernetes/config/onap-parameters-sample.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright © 2017 Amdocs, Bell Canada
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-OPENSTACK_UBUNTU_14_IMAGE: "Ubuntu_14.04.5_LTS"
-OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc"
-OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
-OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
-OPENSTACK_OAM_NETWORK_CIDR: "192.168.30.0/24"
-OPENSTACK_USERNAME: "vnf_user"
-OPENSTACK_API_KEY: "vnf_password"
-OPENSTACK_TENANT_NAME: "vnfs"
-OPENSTACK_TENANT_ID: "47899782ed714295b1151681fdfd51f5"
-OPENSTACK_REGION: "RegionOne"
-OPENSTACK_KEYSTONE_URL: "http://1.2.3.4:5000"
-OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
-OPENSTACK_SERVICE_TENANT_NAME: "service"
-DMAAP_TOPIC: "AUTO"
-DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
diff --git a/kubernetes/config/templates/pod.yaml b/kubernetes/config/templates/pod.yaml
deleted file mode 100644
index 93c64ab3dd..0000000000
--- a/kubernetes/config/templates/pod.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright © 2017 Amdocs, Bell Canada
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: global-onap-configmap
- namespace: "{{ .Values.nsPrefix }}"
-data:
-{{ .Files.Get "onap-parameters.yaml" | indent 2 }}
----
-apiVersion: v1
-kind: Pod
-metadata:
- name: {{ .Chart.Name }}
- namespace: "{{ .Values.nsPrefix }}"
-spec:
- containers:
- - name: {{ .Chart.Name }}
- image: "{{ .Values.image.repository }}"
- imagePullPolicy: {{ .Values.image.pullPolicy }}
- envFrom:
- - configMapRef:
- name: global-onap-configmap
- env:
- - name: NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: NAMESPACE_PREFIX
- value: {{ .Values.nsPrefix }}
- - name: DOCKER_SHARE_PATH
- value: {{ .Values.dockerSharePath }}
- volumeMounts:
- - name: config-init-root
- mountPath: /config-init/
- volumes:
- - name: config-init-root
- hostPath:
- path: {{ .Values.dockerSharePath }}
- restartPolicy: Never
diff --git a/kubernetes/consul/charts/consul-server/templates/service.yaml b/kubernetes/consul/charts/consul-server/templates/service.yaml
index c0f8726826..0b16cf6920 100644
--- a/kubernetes/consul/charts/consul-server/templates/service.yaml
+++ b/kubernetes/consul/charts/consul-server/templates/service.yaml
@@ -38,6 +38,7 @@ spec:
release: {{ .Release.Name }}
clusterIP: None
---
+apiVersion: v1
kind: Service
metadata:
name: {{ include "common.servicename" . }}-ui
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-cluster-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-cluster-health.sh
new file mode 100644
index 0000000000..3b186547ce
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-cluster-health.sh
@@ -0,0 +1,58 @@
+#!/bin/sh
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# query ODL cluster state
+USERNAME="{{.Values.odl.jolokia.username}}"
+PASSWORD="{{.Values.odl.jolokia.password}}"
+
+count=${SDNC_ODL_COUNT:-1}
+siteId=0
+if [ "$SDNC_IS_PRIMARY_CLUSTER" = "false" ];then
+ siteId=1
+fi
+
+for instance in $(seq $count);do
+ shard=member-$(( $siteId*$count + $instance ))-shard-default-config
+ mbean=Category=Shards,name=$shard,type=DistributedConfigDatastore
+ url=http://{{.Release.Name}}-sdnc-$(( $instance-1 )).sdnc-cluster.{{.Release.Namespace}}:8181/jolokia/read/org.opendaylight.controller:$mbean
+
+ response=$( curl -s -u $USERNAME:$PASSWORD $url )
+ rc=$?
+ if [ $rc -ne 0 ];then
+ # failed to contact SDN-C instance - try another
+ echo "Unable to connect to $shard [rc=$?]"
+ continue
+ fi
+
+ status=$( echo "$response" | jq -r ".status" )
+ if [ "$status" != "200" ];then
+ # query failed, try another instance
+ echo "$shard query failed [http-status=$status]"
+ continue
+ fi
+
+ raftState=$( echo "$response" | jq -r ".value.RaftState" )
+ if [ "$raftState" = "Leader" -o "$raftState" = "Follower" ];then
+ # cluster has a leader and is healthy
+ echo "$shard is healthy [RaftState=$raftState]"
+ exit 0
+ else
+ echo "$shard is not healthy [RaftState=$raftState]"
+ fi
+done
+
+# ODL cluster is not healthy
+exit 2
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-cluster-health.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-cluster-health.json
new file mode 100644
index 0000000000..86a7630392
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-cluster-health.json
@@ -0,0 +1,14 @@
+{
+ "service": {
+ "name": "Health Check: SDNC ODL Cluster",
+ "checks": [
+ {
+ "id": "sdnc-odl-cluster-healthcheck",
+ "name": "SDNC ODL Cluster Health Check",
+ "script": "/consul/scripts/sdnc-cluster-health.sh",
+ "interval": "15s",
+ "timeout": "10s"
+ }
+ ]
+ }
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json
index e684c09da8..ed196bd0cf 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json
@@ -5,7 +5,7 @@
{
"id": "sdnctldb01",
"name": "SDNC SDNCTLDB01 Health Check",
- "tcp": "sdnc-dbhost:3306",
+ "tcp": "sdnc-sdnctldb01:3306",
"interval": "10s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json
index ae4820a3e6..b63329f544 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json
@@ -5,7 +5,7 @@
{
"id": "sdnctldb02",
"name": "SDNC SDNCTLDB02 Health Check",
- "tcp": "sdnc-dbhost:3306",
+ "tcp": "sdnc-sdnctldb02:3306",
"interval": "10s",
"timeout": "1s"
}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json
index 604b5be901..db84164fc7 100644
--- a/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json
+++ b/kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json
@@ -5,7 +5,7 @@
{
"id": "sdnc-sdnhost",
"name": "SDNC SDN Host Health Check",
- "http": "http://sdnhost:8282/apidoc/explorer/index.html",
+ "http": "http://sdnc:8282/apidoc/explorer/index.html",
"method": "HEAD",
"header": {
"Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
diff --git a/kubernetes/consul/templates/deployment.yaml b/kubernetes/consul/templates/deployment.yaml
index d37521b5e6..2df4d2ac12 100644
--- a/kubernetes/consul/templates/deployment.yaml
+++ b/kubernetes/consul/templates/deployment.yaml
@@ -42,9 +42,15 @@ spec:
- /bin/sh
- "-c"
- |
+ apk update && apk add jq
cp /tmp/consul/config/* /consul/config
/usr/local/bin/docker-entrypoint.sh agent -client 0.0.0.0 -enable-script-checks -retry-join {{ .Values.consulServer.nameOverride }}
name: {{ include "common.name" . }}
+ env:
+ - name: SDNC_ODL_COUNT
+ value: "{{ .Values.sdnc.replicaCount }}"
+ - name: SDNC_IS_PRIMARY_CLUSTER
+ value: "{{ .Values.sdnc.config.isPrimaryCluster }}"
volumeMounts:
- mountPath: /tmp/consul/config
name: consul-agent-config
diff --git a/kubernetes/consul/values.yaml b/kubernetes/consul/values.yaml
index abfc9ef0ef..37e74f38d5 100644
--- a/kubernetes/consul/values.yaml
+++ b/kubernetes/consul/values.yaml
@@ -61,3 +61,13 @@ ingress:
enabled: false
resources: {}
+
+odl:
+ jolokia:
+ username: admin
+ password: admin
+
+sdnc:
+ config:
+ isPrimaryCluster: true
+ replicaCount: 1
diff --git a/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml b/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml
index 1a6357a96d..3f84a85839 100644
--- a/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml
@@ -62,21 +62,21 @@ postgres:
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:latest
+image: onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.1.11
# DCAE component images to be deployed via Cloudify Manager
# Use to override default setting in blueprints
componentImages:
#TODO for further investigation: input template expansion issues if we comment this out
- config_binding_service: onap/org.onap.dcaegen2.platform.configbinding:latest
- deployment_handler: onap/org.onap.dcaegen2.platform.deployment-handler:latest
- holmes_engine: onap/holmes/engine-management:latest
- holmes_rules: onap/holmes/rule-management:latest
- inventory: onap/org.onap.dcaegen2.platform.inventory-api:latest
- policy_handler: onap/org.onap.dcaegen2.platform.policy-handler:latest
- service_change_handler: onap/org.onap.dcaegen2.platform.servicechange-handler:latest
- tca: onap/org.onap.dcaegen2.deployments.tca-cdap-container:latest
- ves: onap/org.onap.dcaegen2.collectors.ves.vescollector:latest
+ config_binding_service: onap/org.onap.dcaegen2.platform.configbinding:2.1.5
+ deployment_handler: onap/org.onap.dcaegen2.platform.deployment-handler:2.1.5
+ holmes_engine: onap/holmes/engine-management:1.1.0
+ holmes_rules: onap/holmes/rule-management:1.1.0
+ inventory: onap/org.onap.dcaegen2.platform.inventory-api:3.0.1
+ policy_handler: onap/org.onap.dcaegen2.platform.policy-handler:2.4.5
+ service_change_handler: onap/org.onap.dcaegen2.platform.servicechange-handler:1.1.4
+ tca: onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.0
+ ves: onap/org.onap.dcaegen2.collectors.ves.vescollector:1.2.0
# Kubernetes namespace for components deployed via Cloudify manager
# If empty, use the common namespace
diff --git a/kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml b/kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml
index 6d87be3441..fee4cc35bb 100644
--- a/kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml
@@ -43,7 +43,7 @@ config:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.cm-container:latest
+image: onap/org.onap.dcaegen2.deployments.cm-container:1.3.0
pullPolicy: Always
# probe configuration parameters
diff --git a/kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml b/kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml
index c10e2403a3..7e9835457d 100644
--- a/kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml
@@ -45,7 +45,7 @@ readiness:
periodSeconds: 10
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.healthcheck-container:latest
+image: onap/org.onap.dcaegen2.deployments.healthcheck-container:1.1.0
# Kubernetes namespace for components deployed via Cloudify manager
# If empty, use the common namespace
diff --git a/kubernetes/dcaegen2/charts/dcae-redis/resources/redis/scripts/redis-cluster-config.sh b/kubernetes/dcaegen2/charts/dcae-redis/resources/redis/scripts/redis-cluster-config.sh
index f4547525c9..a63ab1668d 100755
--- a/kubernetes/dcaegen2/charts/dcae-redis/resources/redis/scripts/redis-cluster-config.sh
+++ b/kubernetes/dcaegen2/charts/dcae-redis/resources/redis/scripts/redis-cluster-config.sh
@@ -25,9 +25,7 @@
do
echo "======> $(echo $NODES |wc -w) / {{.Values.replicaCount}} pods up"
sleep 5
- RESP=$(wget -vO- --ca-certificate /var/run/secrets/kubernetes.io/serviceaccount/ca.crt --header "Authorization
-: Bearer $(</var/run/secrets/kubernetes.io/serviceaccount/token)" https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT
-_443_TCP_PORT/api/v1/namespaces/{{.Release.Namespace}}/pods?labelSelector=app={{.Chart.Name}})
+ RESP=$(wget -vO- --ca-certificate /var/run/secrets/kubernetes.io/serviceaccount/ca.crt --header "Authorization: Bearer $(</var/run/secrets/kubernetes.io/serviceaccount/token)" https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT/api/v1/namespaces/{{.Release.Namespace}}/pods?labelSelector=app={{.Chart.Name}})
IPS=$(echo $RESP | jq -r '.items[].status.podIP')
IPS2=$(echo $IPS | sed -e 's/[a-zA-Z]*//g')
diff --git a/kubernetes/dcaegen2/charts/dcae-redis/values.yaml b/kubernetes/dcaegen2/charts/dcae-redis/values.yaml
index 8550da074b..6ccab9ccb7 100644
--- a/kubernetes/dcaegen2/charts/dcae-redis/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-redis/values.yaml
@@ -28,7 +28,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.redis-cluster-container:latest
+image: onap/org.onap.dcaegen2.deployments.redis-cluster-container:1.0.0
pullPolicy: Always
# application configuration
diff --git a/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml b/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml
index b3bd31726b..39694c648c 100644
--- a/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml
+++ b/kubernetes/dmaap/charts/dmaap-bus-controller/values.yaml
@@ -30,7 +30,7 @@ pullPolicy: Always
# application images
repository: nexus3.onap.org:10001
-image: onap/dmaap/buscontroller:latest
+image: onap/dmaap/buscontroller:1.0.12
# application configuration
diff --git a/kubernetes/dmaap/charts/message-router/charts/message-router-kafka/values.yaml b/kubernetes/dmaap/charts/message-router/charts/message-router-kafka/values.yaml
index d6a8c2c03c..b2b454d8bc 100644
--- a/kubernetes/dmaap/charts/message-router/charts/message-router-kafka/values.yaml
+++ b/kubernetes/dmaap/charts/message-router/charts/message-router-kafka/values.yaml
@@ -43,7 +43,7 @@ debugEnabled: false
# application configuration
config:
# gerrit branch where the latest code is checked in
- gerritBranch: master
+ gerritBranch: 2.0.0-ONAP
# gerrit project where the latest code is checked in
gerritProject: http://gerrit.onap.org/r/dmaap/messagerouter/messageservice.git
diff --git a/kubernetes/dmaap/charts/message-router/charts/message-router-zookeeper/values.yaml b/kubernetes/dmaap/charts/message-router/charts/message-router-zookeeper/values.yaml
index a71b14a046..3810cdfe57 100644
--- a/kubernetes/dmaap/charts/message-router/charts/message-router-zookeeper/values.yaml
+++ b/kubernetes/dmaap/charts/message-router/charts/message-router-zookeeper/values.yaml
@@ -39,7 +39,7 @@ debugEnabled: false
# application configuration
config:
# gerrit branch where the latest code is checked in
- gerritBranch: master
+ gerritBranch: 2.0.0-ONAP
# gerrit project where the latest code is checked in
gerritProject: http://gerrit.onap.org/r/dmaap/messagerouter/messageservice.git
diff --git a/kubernetes/esr/charts/esr-gui/values.yaml b/kubernetes/esr/charts/esr-gui/values.yaml
index 610d9eb17b..bddbf21a69 100644
--- a/kubernetes/esr/charts/esr-gui/values.yaml
+++ b/kubernetes/esr/charts/esr-gui/values.yaml
@@ -23,7 +23,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aai/esr-gui:latest
+image: onap/aai/esr-gui:1.1.0
pullPolicy: Always
msbaddr: msb-iag.{{ include "common.namespace" . }}:80
diff --git a/kubernetes/esr/values.yaml b/kubernetes/esr/values.yaml
index baa2155375..eb17662701 100644
--- a/kubernetes/esr/values.yaml
+++ b/kubernetes/esr/values.yaml
@@ -27,7 +27,7 @@ subChartsOnly:
# application image
repository: nexus3.onap.org:10001
-image: onap/aai/esr-server:latest
+image: onap/aai/esr-server:1.1.0
pullPolicy: Always
msbaddr: msb-iag.{{ include "common.namespace" . }}:80
diff --git a/kubernetes/log/charts/log-logstash/resources/config/logstash.yml b/kubernetes/log/charts/log-logstash/resources/config/logstash.yml
index 3ddf63f9cc..7e5cf67da5 100644
--- a/kubernetes/log/charts/log-logstash/resources/config/logstash.yml
+++ b/kubernetes/log/charts/log-logstash/resources/config/logstash.yml
@@ -1,4 +1,5 @@
http.host: "0.0.0.0"
+pipeline.workers: 3
## Path where pipeline configurations reside
path.config: /usr/share/logstash/pipeline
diff --git a/kubernetes/msb/charts/kube2msb/values.yaml b/kubernetes/msb/charts/kube2msb/values.yaml
index a97863b858..d4ec088970 100644
--- a/kubernetes/msb/charts/kube2msb/values.yaml
+++ b/kubernetes/msb/charts/kube2msb/values.yaml
@@ -11,8 +11,9 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/oom/kube2msb:latest
+image: onap/oom/kube2msb:1.1.0
pullPolicy: Always
+istioSidecar: false
# application configuration
config:
diff --git a/kubernetes/msb/charts/msb-consul/templates/deployment.yaml b/kubernetes/msb/charts/msb-consul/templates/deployment.yaml
index 74342e7e0c..09a3d8f470 100644
--- a/kubernetes/msb/charts/msb-consul/templates/deployment.yaml
+++ b/kubernetes/msb/charts/msb-consul/templates/deployment.yaml
@@ -15,6 +15,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
containers:
- name: {{ include "common.name" . }}
diff --git a/kubernetes/msb/charts/msb-consul/templates/service.yaml b/kubernetes/msb/charts/msb-consul/templates/service.yaml
index 86442a2740..9a7029d21f 100644
--- a/kubernetes/msb/charts/msb-consul/templates/service.yaml
+++ b/kubernetes/msb/charts/msb-consul/templates/service.yaml
@@ -15,11 +15,11 @@ spec:
{{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.externalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
{{- else -}}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
{{- end}}
selector:
app: {{ include "common.name" . }}
diff --git a/kubernetes/msb/charts/msb-consul/values.yaml b/kubernetes/msb/charts/msb-consul/values.yaml
index 0c7a508dc4..80b86bf82c 100644
--- a/kubernetes/msb/charts/msb-consul/values.yaml
+++ b/kubernetes/msb/charts/msb-consul/values.yaml
@@ -11,6 +11,7 @@ global:
dockerHubRepository: registry.hub.docker.com
image: library/consul:0.9.3
pullPolicy: Always
+istioSidecar: false
# application configuration
config: {}
diff --git a/kubernetes/msb/charts/msb-discovery/templates/deployment.yaml b/kubernetes/msb/charts/msb-discovery/templates/deployment.yaml
index aec03b0b3f..967e0e9bb7 100644
--- a/kubernetes/msb/charts/msb-discovery/templates/deployment.yaml
+++ b/kubernetes/msb/charts/msb-discovery/templates/deployment.yaml
@@ -15,6 +15,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
- command:
diff --git a/kubernetes/msb/charts/msb-discovery/templates/service.yaml b/kubernetes/msb/charts/msb-discovery/templates/service.yaml
index 86442a2740..9a7029d21f 100644
--- a/kubernetes/msb/charts/msb-discovery/templates/service.yaml
+++ b/kubernetes/msb/charts/msb-discovery/templates/service.yaml
@@ -15,11 +15,11 @@ spec:
{{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.externalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
{{- else -}}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
{{- end}}
selector:
app: {{ include "common.name" . }}
diff --git a/kubernetes/msb/charts/msb-discovery/values.yaml b/kubernetes/msb/charts/msb-discovery/values.yaml
index fdbfa338cc..13b1462219 100644
--- a/kubernetes/msb/charts/msb-discovery/values.yaml
+++ b/kubernetes/msb/charts/msb-discovery/values.yaml
@@ -11,8 +11,9 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/msb/msb_discovery:1.1.0-STAGING-latest
+image: onap/msb/msb_discovery:1.2.0-SNAPSHOT-latest
pullPolicy: Always
+istioSidecar: true
# application configuration
config: {}
diff --git a/kubernetes/msb/charts/msb-eag/templates/deployment.yaml b/kubernetes/msb/charts/msb-eag/templates/deployment.yaml
index 26ee3889c1..31bb2c96c8 100644
--- a/kubernetes/msb/charts/msb-eag/templates/deployment.yaml
+++ b/kubernetes/msb/charts/msb-eag/templates/deployment.yaml
@@ -15,6 +15,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
- command:
diff --git a/kubernetes/msb/charts/msb-eag/templates/service.yaml b/kubernetes/msb/charts/msb-eag/templates/service.yaml
index 3e4a786cb8..5c7d83049b 100644
--- a/kubernetes/msb/charts/msb-eag/templates/service.yaml
+++ b/kubernetes/msb/charts/msb-eag/templates/service.yaml
@@ -15,17 +15,17 @@ spec:
{{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.externalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
- port: {{ .Values.service.externalPortHttps }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePortHttps }}
- name: {{ .Values.service.name }}-https
+ name: https-{{ .Values.service.name }}
{{- else -}}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
- port: {{ .Values.service.externalPortHttps }}
targetPort: {{ .Values.service.internalPortHttps }}
- name: {{ .Values.service.name }}-https
+ name: https-{{ .Values.service.name }}
{{- end}}
selector:
app: {{ include "common.name" . }}
diff --git a/kubernetes/msb/charts/msb-eag/values.yaml b/kubernetes/msb/charts/msb-eag/values.yaml
index 74f9c69b5d..cac5c0c782 100644
--- a/kubernetes/msb/charts/msb-eag/values.yaml
+++ b/kubernetes/msb/charts/msb-eag/values.yaml
@@ -11,8 +11,9 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/msb/msb_apigateway:1.1.0-STAGING-latest
+image: onap/msb/msb_apigateway:1.2.0-SNAPSHOT-latest
pullPolicy: Always
+istioSidecar: true
# application configuration
config:
diff --git a/kubernetes/msb/charts/msb-iag/templates/deployment.yaml b/kubernetes/msb/charts/msb-iag/templates/deployment.yaml
index 26ee3889c1..31bb2c96c8 100644
--- a/kubernetes/msb/charts/msb-iag/templates/deployment.yaml
+++ b/kubernetes/msb/charts/msb-iag/templates/deployment.yaml
@@ -15,6 +15,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
- command:
diff --git a/kubernetes/msb/charts/msb-iag/templates/service.yaml b/kubernetes/msb/charts/msb-iag/templates/service.yaml
index 3e4a786cb8..5c7d83049b 100644
--- a/kubernetes/msb/charts/msb-iag/templates/service.yaml
+++ b/kubernetes/msb/charts/msb-iag/templates/service.yaml
@@ -15,17 +15,17 @@ spec:
{{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.externalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
- port: {{ .Values.service.externalPortHttps }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePortHttps }}
- name: {{ .Values.service.name }}-https
+ name: https-{{ .Values.service.name }}
{{- else -}}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
- port: {{ .Values.service.externalPortHttps }}
targetPort: {{ .Values.service.internalPortHttps }}
- name: {{ .Values.service.name }}-https
+ name: https-{{ .Values.service.name }}
{{- end}}
selector:
app: {{ include "common.name" . }}
diff --git a/kubernetes/msb/charts/msb-iag/values.yaml b/kubernetes/msb/charts/msb-iag/values.yaml
index 657a5543b2..885ee00d4f 100644
--- a/kubernetes/msb/charts/msb-iag/values.yaml
+++ b/kubernetes/msb/charts/msb-iag/values.yaml
@@ -11,8 +11,9 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/msb/msb_apigateway:1.1.0-STAGING-latest
+image: onap/msb/msb_apigateway:1.2.0-SNAPSHOT-latest
pullPolicy: Always
+istioSidecar: true
# application configuration
config:
diff --git a/kubernetes/multicloud/charts/multicloud-ocata/templates/deployment.yaml b/kubernetes/multicloud/charts/multicloud-ocata/templates/deployment.yaml
index 0106a79973..0f8335ec31 100644
--- a/kubernetes/multicloud/charts/multicloud-ocata/templates/deployment.yaml
+++ b/kubernetes/multicloud/charts/multicloud-ocata/templates/deployment.yaml
@@ -35,6 +35,8 @@ spec:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
name: {{ include "common.name" . }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
containers:
- env:
diff --git a/kubernetes/multicloud/charts/multicloud-ocata/templates/service.yaml b/kubernetes/multicloud/charts/multicloud-ocata/templates/service.yaml
index f47383e0db..b01f37ed98 100644
--- a/kubernetes/multicloud/charts/multicloud-ocata/templates/service.yaml
+++ b/kubernetes/multicloud/charts/multicloud-ocata/templates/service.yaml
@@ -38,13 +38,13 @@ metadata:
spec:
ports:
{{if eq .Values.service.type "NodePort" -}}
- - name: {{ .Values.service.name }}
+ - name: http-{{ .Values.service.name }}
port: {{ .Values.service.externalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
{{- else -}}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
{{- end}}
selector:
app: {{ include "common.name" . }}
diff --git a/kubernetes/multicloud/charts/multicloud-ocata/values.yaml b/kubernetes/multicloud/charts/multicloud-ocata/values.yaml
index 593757c5a2..5011061f9a 100644
--- a/kubernetes/multicloud/charts/multicloud-ocata/values.yaml
+++ b/kubernetes/multicloud/charts/multicloud-ocata/values.yaml
@@ -23,9 +23,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/multicloud/openstack-ocata:1.1.2-SNAPSHOT
+image: onap/multicloud/openstack-ocata:1.1.2
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# application configuration
config:
msbgateway: msb-iag
diff --git a/kubernetes/multicloud/charts/multicloud-vio/templates/deployment.yaml b/kubernetes/multicloud/charts/multicloud-vio/templates/deployment.yaml
index fa9646657b..110d7aab46 100644
--- a/kubernetes/multicloud/charts/multicloud-vio/templates/deployment.yaml
+++ b/kubernetes/multicloud/charts/multicloud-vio/templates/deployment.yaml
@@ -35,6 +35,8 @@ spec:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
name: {{ include "common.name" . }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
containers:
- env:
diff --git a/kubernetes/multicloud/charts/multicloud-vio/templates/service.yaml b/kubernetes/multicloud/charts/multicloud-vio/templates/service.yaml
index dcaf37d192..e8374970bb 100644
--- a/kubernetes/multicloud/charts/multicloud-vio/templates/service.yaml
+++ b/kubernetes/multicloud/charts/multicloud-vio/templates/service.yaml
@@ -38,13 +38,13 @@ metadata:
spec:
ports:
{{if eq .Values.service.type "NodePort" -}}
- - name: {{ .Values.service.name }}
+ - name: http-{{ .Values.service.name }}
port: {{ .Values.service.externalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
{{- else -}}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
{{- end}}
selector:
app: {{ include "common.name" . }}
diff --git a/kubernetes/multicloud/charts/multicloud-vio/values.yaml b/kubernetes/multicloud/charts/multicloud-vio/values.yaml
index 3b206cc20d..a660e993ef 100644
--- a/kubernetes/multicloud/charts/multicloud-vio/values.yaml
+++ b/kubernetes/multicloud/charts/multicloud-vio/values.yaml
@@ -23,9 +23,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/multicloud/vio:1.1.2-STAGING
+image: onap/multicloud/vio:1.1.2
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# application configuration
config:
msbgateway: msb-iag
diff --git a/kubernetes/multicloud/charts/multicloud-windriver/templates/deployment.yaml b/kubernetes/multicloud/charts/multicloud-windriver/templates/deployment.yaml
index 62857f6664..a48e1e19d2 100644
--- a/kubernetes/multicloud/charts/multicloud-windriver/templates/deployment.yaml
+++ b/kubernetes/multicloud/charts/multicloud-windriver/templates/deployment.yaml
@@ -35,6 +35,8 @@ spec:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
name: {{ include "common.name" . }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
containers:
- env:
diff --git a/kubernetes/multicloud/charts/multicloud-windriver/templates/service.yaml b/kubernetes/multicloud/charts/multicloud-windriver/templates/service.yaml
index dd3bd4214f..276d46f2d9 100644
--- a/kubernetes/multicloud/charts/multicloud-windriver/templates/service.yaml
+++ b/kubernetes/multicloud/charts/multicloud-windriver/templates/service.yaml
@@ -39,13 +39,13 @@ metadata:
spec:
ports:
{{ if eq .Values.service.type "NodePort" }}
- - name: {{ .Values.service.name }}
+ - name: http-{{ .Values.service.name }}
port: {{ .Values.service.externalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
{{ else }}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: http-{{ .Values.service.name }}
{{ end }}
selector:
app: {{ include "common.name" . }}
diff --git a/kubernetes/multicloud/charts/multicloud-windriver/values.yaml b/kubernetes/multicloud/charts/multicloud-windriver/values.yaml
index ef3833ee5d..1286cbdd0f 100644
--- a/kubernetes/multicloud/charts/multicloud-windriver/values.yaml
+++ b/kubernetes/multicloud/charts/multicloud-windriver/values.yaml
@@ -23,9 +23,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/multicloud/openstack-windriver:1.1.2-SNAPSHOT
+image: onap/multicloud/openstack-windriver:1.1.2
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# application configuration
config:
msbgateway: msb-iag
diff --git a/kubernetes/multicloud/values.yaml b/kubernetes/multicloud/values.yaml
index 5d742cbc48..8216962b2a 100644
--- a/kubernetes/multicloud/values.yaml
+++ b/kubernetes/multicloud/values.yaml
@@ -25,9 +25,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/multicloud/framework:1.1.2-STAGING
+image: onap/multicloud/framework:1.1.2
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# application configuration
config:
msbgateway: msb-iag
diff --git a/kubernetes/nbi/templates/deployment.yaml b/kubernetes/nbi/templates/deployment.yaml
index 06572492fd..57bdf341f2 100644
--- a/kubernetes/nbi/templates/deployment.yaml
+++ b/kubernetes/nbi/templates/deployment.yaml
@@ -70,7 +70,7 @@ spec:
- name: ONAP_LCPCLOUDREGIONID
value: {{ .Values.config.openStackRegion }}
- name: ONAP_TENANTID
- value: {{ .Values.config.openStackServiceTenantName }}
+ value: {{ .Values.config.openStackVNFTenantId | quote }}
- name: ONAP_CLOUDOWNER
value: {{ .Values.config.cloudOwner }}
- name: NBI_URL
diff --git a/kubernetes/nbi/values.yaml b/kubernetes/nbi/values.yaml
index 709e580d27..68beacdc35 100644
--- a/kubernetes/nbi/values.yaml
+++ b/kubernetes/nbi/values.yaml
@@ -27,7 +27,7 @@ subChartsOnly:
# application image
repository: nexus3.onap.org:10001
-image: onap/externalapi/nbi:latest
+image: onap/externalapi/nbi:2.0.0
pullPolicy: Always
sdc_authorization: Basic YWFpOktwOGJKNFNYc3pNMFdYbGhhazNlSGxjc2UyZ0F3ODR2YW9HR21KdlV5MlU=
aai_authorization: Basic QUFJOkFBSQ==
@@ -41,7 +41,7 @@ config:
cloudOwner: CloudOwner
ecompInstanceId: OOM
openStackRegion: RegionOne
- openStackServiceTenantName: 31047205ce114b60833b23e400d6a535
+ openStackVNFTenantId: 31047205ce114b60833b23e400d6a535
mariadb:
nameOverride: nbi-mariadb
diff --git a/kubernetes/onap/values.yaml b/kubernetes/onap/values.yaml
index d3b6bc879a..b8f15e17ea 100644
--- a/kubernetes/onap/values.yaml
+++ b/kubernetes/onap/values.yaml
@@ -97,11 +97,8 @@ nbi:
enabled: true
config:
# openstack configuration
- openStackUserName: "vnf_user"
openStackRegion: "Yolo"
- openStackKeyStoneUrl: "http://1.2.3.4:5000"
- openStackServiceTenantName: "service"
- openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e"
+ openStackVNFTenantId: "1234"
policy:
enabled: true
portal:
@@ -115,11 +112,7 @@ sdnc:
replicaCount: 1
- config:
- enableClustering: false
-
mysql:
- disableNfsProvisioner: true
replicaCount: 1
so:
enabled: true
diff --git a/kubernetes/oof/charts/oof-has/charts/oof-has-data/templates/deployment.yaml b/kubernetes/oof/charts/oof-has/charts/oof-has-data/templates/deployment.yaml
index 4effba825e..fde094fd16 100755
--- a/kubernetes/oof/charts/oof-has/charts/oof-has-data/templates/deployment.yaml
+++ b/kubernetes/oof/charts/oof-has/charts/oof-has-data/templates/deployment.yaml
@@ -92,9 +92,9 @@ spec:
- mountPath: /usr/local/bin/aai_key.key
name: {{ .Values.global.commonConfigPrefix }}-config
subPath: aai_key.key
- - mountPath: /usr/local/bin/bundle.pem
+ - mountPath: /usr/local/bin/AAF_RootCA.cer
name: {{ .Values.global.commonConfigPrefix }}-config
- subPath: bundle.pem
+ subPath: AAF_RootCA.cer
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- if .Values.nodeSelector }}
@@ -123,7 +123,7 @@ spec:
path: aai_cert.cer
- key: aai_key.key
path: aai_key.key
- - key: bundle.pem
- path: bundle.pem
+ - key: AAF_RootCA.cer
+ path: AAF_RootCA.cer
imagePullSecrets:
- name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/oof/charts/oof-has/charts/oof-has-music/values.yaml b/kubernetes/oof/charts/oof-has/charts/oof-has-music/values.yaml
index d52c3c0321..ba0475988d 100644
--- a/kubernetes/oof/charts/oof-has/charts/oof-has-music/values.yaml
+++ b/kubernetes/oof/charts/oof-has/charts/oof-has-music/values.yaml
@@ -30,7 +30,7 @@ global:
repository: nexus3.onap.org:10001
image: library/tomcat:8.5
pullPolicy: Always
-warImage: onap/music/music:latest
+warImage: onap/music/music:2.5.3
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/oof/charts/oof-has/resources/config/AAF_RootCA.cer b/kubernetes/oof/charts/oof-has/resources/config/AAF_RootCA.cer
new file mode 100755
index 0000000000..e9a50d7ea0
--- /dev/null
+++ b/kubernetes/oof/charts/oof-has/resources/config/AAF_RootCA.cer
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFPjCCAyagAwIBAgIJAJ6u7cCnzrWdMA0GCSqGSIb3DQEBCwUAMCwxDjAMBgNV
+BAsMBU9TQUFGMQ0wCwYDVQQKDARPTkFQMQswCQYDVQQGEwJVUzAeFw0xODA0MDUx
+NDE1MjhaFw0zODAzMzExNDE1MjhaMCwxDjAMBgNVBAsMBU9TQUFGMQ0wCwYDVQQK
+DARPTkFQMQswCQYDVQQGEwJVUzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
+ggIBAMA5pkgRs7NhGG4ew5JouhyYakgYUyFaG121+/h8qbSdt0hVQv56+EA41Yq7
+XGie7RYDQK9NmAFF3gruE+6X7wvJiChp+Cyd7sFMnb65uWhxEdxWTM2BJFrgfzUn
+H8ZCxgaCo3XH4PzlKRy2LQQJEJECwl/RZmRCXijMt5e9h8XoZY/fKkKcZZUsWNCM
+pTo266wjvA9MXLmdgReRj0+vrCjrNqy+htwJDztoiHWiYPqT6o8EvGcgjNqjlZx7
+NUNf8MfLDByqKF6+wRbHv1GKjn3/Vijd45Fv8riyRYROiFanvbV6jIfBkv8PZbXg
+2VDWsYsgp8NAvMxK+iV8cO+Ck3lBI2GOPZbCEqpPVTYbLUz6sczAlCXwQoPzDIZY
+wYa3eR/gYLY1gP2iEVHORag3bLPap9ZX5E8DZkzTNTjovvLk8KaCmfcaUMJsBtDd
+ApcUitz10cnRyZc1sX3gE1f3DpzQM6t9C5sOVyRhDcSrKqqwb9m0Ss04XAS9FsqM
+P3UWYQyqDXSxlUAYaX892u8mV1hxnt2gjb22RloXMM6TovM3sSrJS0wH+l1nznd6
+aFXftS/G4ZVIVZ/LfT1is4StoyPWZCwwwly1z8qJQ/zhip5NgZTxQw4mi7ww35DY
+PdAQOCoajfSvFjqslQ/cPRi/MRCu079heVb5fQnnzVtnpFQRAgMBAAGjYzBhMB0G
+A1UdDgQWBBRTVTPyS+vQUbHBeJrBKDF77+rtSTAfBgNVHSMEGDAWgBRTVTPyS+vQ
+UbHBeJrBKDF77+rtSTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAN
+BgkqhkiG9w0BAQsFAAOCAgEAPx/IaK94n02wPxpnYTy+LVLIxwdq/kawNd6IbiMz
+L87zmNMDmHcGbfoRCj8OkhuggX9Lx1/CkhpXimuYsZOFQi5blr/u+v4mIbsgbmi9
+7j+cUHDP0zLycvSvxKHty51LwmaX9a4wkJl5zBU4O1sd/H9tWcEmwJ39ltKoBKBx
+c94Zc3iMm5ytRWGj+0rKzLDAXEWpoZ5bE5PLJauA6UDCxDLfs3FwhbS7uDggxYvf
+jySF5FCNET94oJ+m8s7VeHvoa8iPGKvXrIqdd7XDHnqJJlVKr7m9S0fMbyEB8ci2
+RtOXDt93ifY1uhoEtEykn4dqBSp8ezvNMnwoXdYPDvTd9uCAFeWFLVreBAWxd25h
+PsBTkZA5hpa/rA+mKv6Af4VBViYr8cz4dZCsFChuioVebe9ighrfjB//qKepFjPF
+CyjzKN1u0JKm/2x/ORqxkTONG8p3uDwoIOyimUcTtTMv42bfYD88RKakqSFXE9G+
+Z0LlaKABqfjK49o/tsAp+c5LoNlYllKhnetO3QAdraHwdmC36BhoghzR1jpX751A
+cZn2VH3Q4XKyp01cJNCJIrua+A+bx6zh3RyW6zIIkbRCbET+UD+4mr8WIcSE3mtR
+ZVlnhUDO4z9//WKMVzwS9Rh8/kuszrGFI1KQozXCHLrce3YP6RYZfOed79LXaRwX
+dYY=
+-----END CERTIFICATE-----
diff --git a/kubernetes/oof/charts/oof-has/resources/config/conductor.conf b/kubernetes/oof/charts/oof-has/resources/config/conductor.conf
index 4d32deea33..588b9360ba 100755
--- a/kubernetes/oof/charts/oof-has/resources/config/conductor.conf
+++ b/kubernetes/oof/charts/oof-has/resources/config/conductor.conf
@@ -149,22 +149,29 @@ complex_cache_refresh_interval = 60
# Base URL for A&AI, up to and not including the version, and without a
# trailing slash. (string value)
-server_url = https://oof.api.simpledemo.onap.org:8443/aai
+server_url = https://{{.Values.config.aai.serviceName}}.{{ include "common.namespace" . }}:{{.Values.config.aai.port}}/aai
# The version of A&AI in v# format. (string value)
-server_url_version = v11
+server_url_version = v13
# SSL/TLS certificate file in pem format. This certificate must be registered
# with the A&AI endpoint. (string value)
-certificate_file = /usr/local/bin/aai_cert.cer
+certificate_file =
# Private Certificate Key file in pem format. (string value)
-certificate_key_file = /usr/local/bin/aai_key.key
+certificate_key_file =
# Certificate Authority Bundle file in pem format. Must contain the appropriate
# trust chain for the Certificate file. (string value)
#certificate_authority_bundle_file = /opt/app/conductor/etc/certs/ca_bundle.pem
-certificate_authority_bundle_file =/usr/local/bin/bundle.pem
+certificate_authority_bundle_file =/usr/local/bin/AAF_RootCA.cer
+
+# Basic Authentication Username (string value)
+username = OOF
+
+# Basic Authentication Password (string value)
+password = OOF
+
[api]
@@ -355,3 +362,31 @@ concurrent = true
[reservation]
concurrent = true
+
+[multicloud]
+
+#
+# From conductor
+#
+
+# Base URL for Multicloud without a trailing slash. (string value)
+server_url = http://{{.Values.config.msb.serviceName}}:{{.Values.config.msb.port}}/api/multicloud
+
+# Timeout for Multicloud Rest Call (string value)
+multicloud_rest_timeout = 30
+
+# Number of retry for Multicloud Rest Call (string value)
+multicloud_retries = 3
+
+# The version of Multicloud API. (string value)
+server_url_version = v0
+
+
+[vim_controller]
+#
+# From conductor
+#
+
+# Extensions list to use (list value)
+extensions = multicloud
+
diff --git a/kubernetes/oof/charts/oof-has/values.yaml b/kubernetes/oof/charts/oof-has/values.yaml
index 25a076b6cc..6ffb372843 100755
--- a/kubernetes/oof/charts/oof-has/values.yaml
+++ b/kubernetes/oof/charts/oof-has/values.yaml
@@ -24,9 +24,17 @@ global:
commonConfigPrefix: onap-oof-has
image:
readiness: oomk8s/readiness-check:2.0.0
- optf_has: onap/optf-has:latest
+ optf_has: onap/optf-has:1.1.1
filebeat: docker.elastic.co/beats/filebeat:5.5.0
pullPolicy: Always
nodePortPrefix: 302
dataRootDir: /dockerdata-nfs
+config:
+ aai:
+ serviceName: aai
+ port: 8443
+ msb:
+ serviceName: msb-iag
+ port: 80
+
diff --git a/kubernetes/oof/resources/config/osdf_config.yaml b/kubernetes/oof/resources/config/osdf_config.yaml
index 7fe63a50c8..f286be4595 100755
--- a/kubernetes/oof/resources/config/osdf_config.yaml
+++ b/kubernetes/oof/resources/config/osdf_config.yaml
@@ -1,37 +1,18 @@
-# Copyright © 2017 Amdocs, Bell Canada
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Credentials for the OOF placement service – SO. Newly added.
osdfPlacementSOUsername: {{ .Values.config.osdfPlacementSOUsername }}
osdfPlacementSOPassword: {{ .Values.config.osdfPlacementSOPassword }}
-# Credentials for the OOF CM scheduling service – Generic. Newly added
osdfCMSchedulerUsername: {{ .Values.config.osdfCMSchedulerUsername }}
osdfCMSchedulerPassword: {{ .Values.config.osdfCMSchedulerPassword }}
-# msoUrl: "" # The SO url for call back. This will be part of the request, so no need
soUsername: {{ .Values.config.soUsername }}
soPassword: {{ .Values.config.soPassword }}
-conductorUrl: "{{.Release.Name}}-has-api.{{include "common.namespace" .}}:TODO-PORT-FOR-HAS-API"
+conductorUrl: {{ .Values.config.conductorUrl }}
conductorUsername: {{ .Values.config.conductorUsername }}
conductorPassword: {{ .Values.config.conductorPassword }}
conductorPingWaitTime: {{ .Values.config.conductorPingWaitTime }}
conductorMaxRetries: {{ .Values.config.conductorMaxRetries }}
-# Policy Platform -- requires ClientAuth, Authorization, and Environment
policyPlatformUrl: {{ .Values.config.policyPlatformUrl }}
policyPlatformEnv: {{ .Values.config.policyPlatformEnv }}
policyPlatformUsername: {{ .Values.config.policyPlatformUsername }}
diff --git a/kubernetes/oof/values.yaml b/kubernetes/oof/values.yaml
index 9f74271418..baeed47257 100644
--- a/kubernetes/oof/values.yaml
+++ b/kubernetes/oof/values.yaml
@@ -26,7 +26,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/optf-osdf:latest
+image: onap/optf-osdf:1.1.1
pullPolicy: Always
# flag to enable debugging - application support required
@@ -34,7 +34,7 @@ debugEnabled: false
# application configuration
config:
- msbgateway: msb-iag.{{ include "common.namespace" . }}
+ msbgateway: msb-iag
msbPort: 80
# Credentials for the OOF placement service – SO.
osdfPlacementSOUsername: so_test
@@ -46,7 +46,7 @@ config:
soUsername: "" # SO username for call back.
soPassword: "" # SO password for call back.
# Url and credentials for Conductor.
- conductorUrl: https://oof-has-api.{{ include "common.namespace" . }}:8091/v1/plans/
+ conductorUrl: http://oof-has-api:8091/v1/plans/
conductorUsername: admin1
conductorPassword: plan.15
conductorPingWaitTime: 60
diff --git a/kubernetes/policy/charts/brmsgw/values.yaml b/kubernetes/policy/charts/brmsgw/values.yaml
index efc397654a..9815f4d93b 100644
--- a/kubernetes/policy/charts/brmsgw/values.yaml
+++ b/kubernetes/policy/charts/brmsgw/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/policy-pe:1.2.2
+image: onap/policy-pe:1.3-SNAPSHOT-latest
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/policy/charts/drools/charts/nexus/values.yaml b/kubernetes/policy/charts/drools/charts/nexus/values.yaml
index 03a03cc198..add7a7c7be 100644
--- a/kubernetes/policy/charts/drools/charts/nexus/values.yaml
+++ b/kubernetes/policy/charts/drools/charts/nexus/values.yaml
@@ -46,14 +46,14 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 180
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 60
periodSeconds: 10
path: /nexus/service/local/status
diff --git a/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh b/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh
index 218cd8aead..5f504e2c65 100644
--- a/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh
+++ b/kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/drools-tweaks.sh
@@ -16,7 +16,6 @@
"${POLICY_HOME}"/bin/features enable healthcheck
-"${POLICY_HOME}"/bin/features enable pooling-dmaap
"${POLICY_HOME}"/bin/features enable distributed-locking
"${POLICY_HOME}"/bin/db-migrator -s pooling -o upgrade
@@ -25,16 +24,16 @@
# so not to lose any configuration updates
echo
-echo "creating PDPD-CONFIGURATION topic"
+echo "testing publish to PDPD-CONFIGURATION topic"
echo
-curl --silent --connect-timeout 60 -X POST --header "Content-Type: application/json" -d "{}" http://message-router:3904/events/PDPD-CONFIGURATION
+curl --silent --connect-timeout 15 -X POST --header "Content-Type: application/json" -d "{}" http://message-router:3904/events/PDPD-CONFIGURATION
echo
-echo "removing PDPD-CONFIGURATION topic dummy message"
+echo "testing subscribe to PDPD-CONFIGURATION topic "
echo
-curl --silent --connect-timeout 60 -X GET http://message-router:3904/events/PDPD-CONFIGURATION/1/1?timeout=15000
+curl --silent --connect-timeout 15 -X GET http://message-router:3904/events/PDPD-CONFIGURATION/1/1?timeout=5000
# for resiliency/scalability scenarios, check to see
# if there's an amsterdam artifact already deployed
@@ -47,7 +46,7 @@ echo
echo "checking if there are amsterdam policies already deployed .."
echo
-AMSTERDAM_VERSION=$(curl --silent --connect-timeout 60 -X GET "http://nexus:8081/nexus/service/local/artifact/maven/resolve?r=releases&g=org.onap.policy-engine.drools.amsterdam&a=policy-amsterdam-rules&v=RELEASE" | grep -Po "(?<=<version>).*(?=</version>)")
+AMSTERDAM_VERSION=$(curl --silent --connect-timeout 20 -X GET "http://nexus:8081/nexus/service/local/artifact/maven/resolve?r=releases&g=org.onap.policy-engine.drools.amsterdam&a=policy-amsterdam-rules&v=RELEASE" | grep -Po "(?<=<version>).*(?=</version>)")
if [[ -z ${AMSTERDAM_VERSION} ]]; then
echo "no amsterdam policies have been found .."
diff --git a/kubernetes/policy/charts/drools/values.yaml b/kubernetes/policy/charts/drools/values.yaml
index ca62f2b01d..fb50f700a9 100644
--- a/kubernetes/policy/charts/drools/values.yaml
+++ b/kubernetes/policy/charts/drools/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/policy-drools:1.2.2
+image: onap/policy-drools:1.3-SNAPSHOT-latest
pullPolicy: Always
# flag to enable debugging - application support required
@@ -38,7 +38,7 @@ config:
nexusPort: 8081
# default number of instances
-replicaCount: 2
+replicaCount: 1
nodeSelector: {}
@@ -46,14 +46,14 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 180
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 60
periodSeconds: 10
service:
diff --git a/kubernetes/policy/charts/mariadb/values.yaml b/kubernetes/policy/charts/mariadb/values.yaml
index 7703234a66..a3dd7676a4 100644
--- a/kubernetes/policy/charts/mariadb/values.yaml
+++ b/kubernetes/policy/charts/mariadb/values.yaml
@@ -45,14 +45,14 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 120
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 120
periodSeconds: 10
## Persist data to a persitent volume
diff --git a/kubernetes/policy/charts/pdp/values.yaml b/kubernetes/policy/charts/pdp/values.yaml
index 811830ea89..dcaefb32a3 100644
--- a/kubernetes/policy/charts/pdp/values.yaml
+++ b/kubernetes/policy/charts/pdp/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/policy-pe:1.2.2
+image: onap/policy-pe:1.3-SNAPSHOT-latest
pullPolicy: Always
# flag to enable debugging - application support required
@@ -38,7 +38,7 @@ config:
papPort: 9091
# default number of instances
-replicaCount: 2
+replicaCount: 1
nodeSelector: {}
diff --git a/kubernetes/policy/charts/policy-apex-pdp/Chart.yaml b/kubernetes/policy/charts/policy-apex-pdp/Chart.yaml
new file mode 100644
index 0000000000..5a07fff6d4
--- /dev/null
+++ b/kubernetes/policy/charts/policy-apex-pdp/Chart.yaml
@@ -0,0 +1,22 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2018 Ericsson. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: v1
+description: ONAP Policy APEX PDP
+name: policy-apex-pdp
+version: 2.0.0
diff --git a/kubernetes/policy/charts/policy-apex-pdp/requirements.yaml b/kubernetes/policy/charts/policy-apex-pdp/requirements.yaml
new file mode 100644
index 0000000000..fee7a3c24d
--- /dev/null
+++ b/kubernetes/policy/charts/policy-apex-pdp/requirements.yaml
@@ -0,0 +1,22 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2018 Ericsson. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+dependencies:
+ - name: common
+ version: ~2.0.0
+ repository: '@local'
diff --git a/kubernetes/policy/charts/policy-apex-pdp/resources/config/config.json b/kubernetes/policy/charts/policy-apex-pdp/resources/config/config.json
new file mode 100644
index 0000000000..5db1839ac9
--- /dev/null
+++ b/kubernetes/policy/charts/policy-apex-pdp/resources/config/config.json
@@ -0,0 +1,78 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2018 Ericsson. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+{
+ "engineServiceParameters": {
+ "name": "PolicyApexPDPEngine",
+ "version": "0.0.1",
+ "id": 45,
+ "instanceCount": 4,
+ "deploymentPort": 12345,
+ "engineParameters": {
+ "executorParameters": {
+ "JAVASCRIPT": {
+ "parameterClassName": "org.onap.policy.apex.plugins.executor.javascript.JavascriptExecutorParameters"
+ }
+ },
+ "contextParameters": {
+ "parameterClassName": "org.onap.policy.apex.context.parameters.ContextParameters",
+ "schemaParameters": {
+ "Avro": {
+ "parameterClassName": "org.onap.policy.apex.plugins.context.schema.avro.AvroSchemaHelperParameters"
+ }
+ }
+ }
+ }
+ },
+ "eventInputParameters": {
+ "ApexConsumer": {
+ "carrierTechnologyParameters" : {
+ "carrierTechnology" : "RESTSERVER",
+ "parameterClassName" :
+ "org.onap.policy.apex.plugins.event.carrier.restserver.RESTServerCarrierTechnologyParameters",
+ "parameters" : {
+ "standalone" : true,
+ "host" : "localhost",
+ "port" : 12346
+ }
+ },
+ "eventProtocolParameters":{
+ "eventProtocol" : "JSON"
+ },
+ "synchronousMode" : true,
+ "synchronousPeer" : "ApexProducer",
+ "synchronousTimeout" : 500
+ }
+ },
+ "eventOutputParameters": {
+ "ApexProducer": {
+ "carrierTechnologyParameters":{
+ "carrierTechnology" : "RESTSERVER",
+ "parameterClassName" :
+ "org.onap.policy.apex.plugins.event.carrier.restserver.RESTServerCarrierTechnologyParameters"
+ },
+ "eventProtocolParameters":{
+ "eventProtocol" : "JSON"
+ },
+ "synchronousMode" : true,
+ "synchronousPeer" : "ApexConsumer",
+ "synchronousTimeout" : 500
+ }
+ }
+}
+
+
diff --git a/kubernetes/policy/charts/policy-apex-pdp/templates/NOTES.txt b/kubernetes/policy/charts/policy-apex-pdp/templates/NOTES.txt
new file mode 100644
index 0000000000..c882c3385e
--- /dev/null
+++ b/kubernetes/policy/charts/policy-apex-pdp/templates/NOTES.txt
@@ -0,0 +1,37 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2018 Ericsson. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ include "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/policy/charts/policy-apex-pdp/templates/configmap.yaml b/kubernetes/policy/charts/policy-apex-pdp/templates/configmap.yaml
new file mode 100644
index 0000000000..d35b5de4c2
--- /dev/null
+++ b/kubernetes/policy/charts/policy-apex-pdp/templates/configmap.yaml
@@ -0,0 +1,26 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2018 Ericsson. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-configmap
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
+
diff --git a/kubernetes/policy/charts/policy-apex-pdp/templates/service.yaml b/kubernetes/policy/charts/policy-apex-pdp/templates/service.yaml
new file mode 100644
index 0000000000..c90d76efc5
--- /dev/null
+++ b/kubernetes/policy/charts/policy-apex-pdp/templates/service.yaml
@@ -0,0 +1,44 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2018 Ericsson. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "common.servicename" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ {{if eq .Values.service.type "NodePort" -}}
+ - port: {{ .Values.service.externalPort }}
+ nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
+ name: {{ .Values.service.portName }}
+ {{- else -}}
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ name: {{ .Values.service.portName }}
+ {{- end}}
+ selector:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ sessionAffinity: None
diff --git a/kubernetes/policy/charts/policy-apex-pdp/templates/statefulset.yaml b/kubernetes/policy/charts/policy-apex-pdp/templates/statefulset.yaml
new file mode 100644
index 0000000000..a1083b2809
--- /dev/null
+++ b/kubernetes/policy/charts/policy-apex-pdp/templates/statefulset.yaml
@@ -0,0 +1,99 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2018 Ericsson. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ serviceName: {{ include "common.servicename" . }}
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "common.name" . }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ containers:
+ - name: {{ include "common.name" . }}
+ image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+ command:
+ - /opt/app/policy/apex-pdp/bin/apexEngine.sh
+ - -c
+ - /home/apexuser/config/config.json
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ ports:
+ - containerPort: {{ .Values.service.externalPort }}
+ {{- if eq .Values.liveness.enabled true }}
+ livenessProbe:
+ tcpSocket:
+ port: {{ .Values.service.externalPort }}
+ initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+ periodSeconds: {{ .Values.liveness.periodSeconds }}
+ {{- end }}
+ readinessProbe:
+ tcpSocket:
+ port: {{ .Values.service.externalPort }}
+ initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readiness.periodSeconds }}
+ env:
+ - name: REPLICAS
+ value: "{{ .Values.replicaCount }}"
+ volumeMounts:
+ - mountPath: /etc/localtime
+ name: localtime
+ readOnly: true
+ - mountPath: /var/log/onap
+ name: policy-logs
+ - mountPath: /home/apexuser/config/config.json
+ name: apexconfig
+ subpath: config.json
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 10 }}
+ {{- end -}}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 10 }}
+ {{- end }}
+ volumes:
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: policy-logs
+ emptyDir: {}
+ - name: apexconfig
+ configMap:
+ name: {{ include "common.fullname" . }}-configmap
+ items:
+ - key: config.json
+ path: config.json
+ mode: 0755
+ imagePullSecrets:
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/policy/charts/policy-apex-pdp/values.yaml b/kubernetes/policy/charts/policy-apex-pdp/values.yaml
new file mode 100644
index 0000000000..3c7b5594a6
--- /dev/null
+++ b/kubernetes/policy/charts/policy-apex-pdp/values.yaml
@@ -0,0 +1,68 @@
+# ============LICENSE_START=======================================================
+# Copyright (C) 2018 Ericsson. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+# ============LICENSE_END=========================================================
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 302
+ persistence: {}
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+# application image
+repository: nexus3.onap.org:10001
+image: onap/policy-apex-pdp:2.0-SNAPSHOT-latest
+pullPolicy: Always
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+# application configuration
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+readiness:
+ initialDelaySeconds: 20
+ periodSeconds: 10
+
+service:
+ type: NodePort
+ name: policy-apex-pdp
+ portName: policy-apex-pdp
+ externalPort: 12345
+ nodePort: 37
+
+ingress:
+ enabled: false
+
+resources: {}
diff --git a/kubernetes/policy/values.yaml b/kubernetes/policy/values.yaml
index 42d58472ed..a5cdc043a7 100644
--- a/kubernetes/policy/values.yaml
+++ b/kubernetes/policy/values.yaml
@@ -34,13 +34,15 @@ global:
nameOverride: brmsgw
nexus:
nameOverride: nexus
+ policy-apex-pdp:
+ nameOverride: policy-apex-pdp
#################################################################
# Application configuration defaults.
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/policy-pe:1.2.2
+image: onap/policy-pe:1.3-SNAPSHOT-latest
pullPolicy: Always
subChartsOnly:
@@ -58,6 +60,8 @@ brmwgw:
nameOverride: brmsgw
nexus:
nameOverride: nexus
+policy-apex-pdp:
+ nameOverride: policy-apex-pdp
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/logback.xml b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/logback.xml
index f242c41e05..5500163ba7 100644
--- a/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/logback.xml
+++ b/kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/logback.xml
@@ -33,21 +33,16 @@
limitations under the License.
============LICENSE_END============================================
-
- ECOMP is a trademark and service mark of AT&T Intellectual Property.
-->
<!DOCTYPE xml>
<configuration scan="true" scanPeriod="3 seconds" debug="true">
- <!-- Log-back files for the ECOMP Portal are created in directory
- ${catalina.base}/logs/${componentName}; e.g., apache-tomcat-8.0.35/logs/onapportal/application.log -->
- <!--<jmxConfigurator /> -->
<!-- specify the component name -->
<property name="componentName" value="onapportal"></property>
<!-- specify the base path of the log directory -->
- <property name="logDirPrefix" value="${catalina.base}/logs"></property>
+ <property name="logDirPrefix" value="/var/log/onap"></property>
<!-- The directories where logs are written -->
<property name="logDirectory" value="${logDirPrefix}/${componentName}" />
diff --git a/kubernetes/portal/charts/portal-app/values.yaml b/kubernetes/portal/charts/portal-app/values.yaml
index f9081f6711..c316450507 100644
--- a/kubernetes/portal/charts/portal-app/values.yaml
+++ b/kubernetes/portal/charts/portal-app/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/portal-app:v2.2.0
+image: onap/portal-app:2.2.0
pullPolicy: Always
# default number of instances
diff --git a/kubernetes/portal/charts/portal-mariadb/values.yaml b/kubernetes/portal/charts/portal-mariadb/values.yaml
index fc4836bf07..0fb6c0e812 100644
--- a/kubernetes/portal/charts/portal-mariadb/values.yaml
+++ b/kubernetes/portal/charts/portal-mariadb/values.yaml
@@ -23,7 +23,7 @@ global: # global defaults
# application image
repository: nexus3.onap.org:10001
-image: onap/portal-db:v2.2.0
+image: onap/portal-db:2.2.0
pullPolicy: Always
readinessImage: readiness-check:2.0.0
@@ -48,7 +48,7 @@ config:
# application's front end hostname. Must be resolvable on the client side environment
vidHostName: "vid.api.simpledemo.onap.org"
# aai sparky ui assignment for port 8080
- aaiSparkyPort: "" # TODO: populate with
+ aaiSparkyPort: "30220"
# application's front end hostname. Must be resolvable on the client side environment
aaiSparkyHostName: "aai.api.sparky.simpledemo.onap.org"
# cli ui assignment for port 8080
@@ -56,7 +56,7 @@ config:
# application's front end hostname. Must be resolvable on the client side environment
cliHostName: "cli.api.simpledemo.onap.org"
# portal sdk (demo app) ui assignment for port 8990
- portalSdkPort: "" # TODO: populate with port
+ portalSdkPort: "30212"
# application's front end hostname. Must be resolvable on the client side environment
portalSdkHostName: "portal-sdk.simpledemo.onap.org"
# dmaap bus controller ui assignment for port ?
diff --git a/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml b/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml
index 864a8b9250..d1465c5f4e 100644
--- a/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml
+++ b/kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml
@@ -34,19 +34,13 @@
============LICENSE_END============================================
- ECOMP is a trademark and service mark of AT&T Intellectual Property.
-->
<configuration scan="true" scanPeriod="3 seconds" debug="true">
- <!--
- Logback files for the ONAP Portal SDK Application
- are created in directory ${catalina.base}/logs/${componentName}
- e.g., apache-tomcat-8.0.35/logs/onapsdk/application.log
- -->
<!--<jmxConfigurator /> -->
<!-- specify the component name -->
<property name="componentName" value="onapsdk"></property>
<!-- specify the base path of the log directory -->
- <property name="logDirPrefix" value="${catalina.base}/logs"></property>
+ <property name="logDirPrefix" value="/var/log/onap"></property>
<!-- The directories where logs are written -->
<property name="logDirectory" value="${logDirPrefix}/${componentName}" />
<!-- Can easily relocate debug logs by modifying this path. -->
@@ -223,4 +217,4 @@
<root level="DEBUG">
<appender-ref ref="asyncEELF" />
</root>
-</configuration> \ No newline at end of file
+</configuration>
diff --git a/kubernetes/portal/charts/portal-sdk/templates/service.yaml b/kubernetes/portal/charts/portal-sdk/templates/service.yaml
index 75d4a14dcf..7cd05606e8 100644
--- a/kubernetes/portal/charts/portal-sdk/templates/service.yaml
+++ b/kubernetes/portal/charts/portal-sdk/templates/service.yaml
@@ -26,7 +26,7 @@ spec:
type: {{ .Values.service.type }}
ports:
{{if eq .Values.service.type "NodePort" -}}
- - port: {{ .Values.service.externalPort }}
+ - port: {{ .Values.service.internalPort }}
targetPort: {{ .Values.service.internalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
name: {{ .Values.service.portName }}
diff --git a/kubernetes/portal/charts/portal-sdk/values.yaml b/kubernetes/portal/charts/portal-sdk/values.yaml
index d0aae9c9f7..928c0a305f 100644
--- a/kubernetes/portal/charts/portal-sdk/values.yaml
+++ b/kubernetes/portal/charts/portal-sdk/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/portal-sdk:v2.2.0
+image: onap/portal-sdk:2.2.0
pullPolicy: Always
# flag to enable debugging - application support required
@@ -53,11 +53,11 @@ readiness:
periodSeconds: 10
service:
- type: ClusterIP
+ type: NodePort
name: portal-sdk
portName: portal-sdk
- externalPort: 8990
internalPort: 8080
+ nodePort: 12
mariadb:
service:
diff --git a/kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties b/kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties
index 738834902c..60cc1db3e6 100644
--- a/kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties
+++ b/kubernetes/portal/charts/portal-widget/resources/config/deliveries/properties/ONAPWIDGETMS/application.properties
@@ -19,7 +19,7 @@ security.user.name=widget_user
security.user.password=ENC(IjywcRnI9+nuVEh9+OFFiRWAjBT1n718)
initialization.default.widgets=true
-initialization.widgetData.url=http://{{.Values.global.portalHostName}}:{{.Values.global.portalPort}}/ONAPPORTAL/commonWidgets
+initialization.widgetData.url=http://portal-app:{{.Values.global.portalPort}}/ONAPPORTAL/commonWidgets
## Account Basic Authentication Properties
account.user.name=portal
diff --git a/kubernetes/portal/charts/portal-widget/values.yaml b/kubernetes/portal/charts/portal-widget/values.yaml
index b08fd60ffe..f4c9735250 100644
--- a/kubernetes/portal/charts/portal-widget/values.yaml
+++ b/kubernetes/portal/charts/portal-widget/values.yaml
@@ -28,7 +28,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/portal-wms:v2.2.0
+image: onap/portal-wms:2.2.0
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/portal/docker/init/mariadb-client/Dockerfile b/kubernetes/portal/docker/init/mariadb-client/Dockerfile
index 009f2fa757..e64b1e2d87 100644
--- a/kubernetes/portal/docker/init/mariadb-client/Dockerfile
+++ b/kubernetes/portal/docker/init/mariadb-client/Dockerfile
@@ -1,6 +1,6 @@
FROM boxfuse/flyway:5.0.7-alpine
-ARG branch=master
+ARG branch=2.0.0-ONAP
ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST"
# Setup Corporate proxy
ENV https_proxy ${HTTP_PROXY}
diff --git a/kubernetes/robot/resources/config/eteshare/config/vm_properties.py b/kubernetes/robot/resources/config/eteshare/config/vm_properties.py
index 261d345c84..62f0b630a2 100755
--- a/kubernetes/robot/resources/config/eteshare/config/vm_properties.py
+++ b/kubernetes/robot/resources/config/eteshare/config/vm_properties.py
@@ -12,7 +12,7 @@ GLOBAL_INJECTED_DCAE_IP_ADDR = "dcae-healthcheck.{{include "common.namespace" .}
GLOBAL_INJECTED_DNS_IP_ADDR = "N/A"
GLOBAL_INJECTED_DOCKER_VERSION = "1.2-STAGING-latest"
GLOBAL_INJECTED_EXTERNAL_DNS = "N/A"
-GLOBAL_INJECTED_GERRIT_BRANCH = "master"
+GLOBAL_INJECTED_GERRIT_BRANCH = "2.0.0-ONAP"
GLOBAL_INJECTED_LOG_ELASTICSEARCH_IP_ADDR = "log-es.{{include "common.namespace" .}}"
GLOBAL_INJECTED_LOG_KIBANA_IP_ADDR = "log-kibana.{{include "common.namespace" .}}"
GLOBAL_INJECTED_LOG_LOGSTASH_IP_ADDR = "log-ls-http.{{include "common.namespace" .}}"
@@ -67,7 +67,7 @@ GLOBAL_INJECTED_PROPERTIES = {
"GLOBAL_INJECTED_DNS_IP_ADDR" : "N/A",
"GLOBAL_INJECTED_DOCKER_VERSION" : "1.2-STAGING-latest",
"GLOBAL_INJECTED_EXTERNAL_DNS" : "N/A",
- "GLOBAL_INJECTED_GERRIT_BRANCH" : "master",
+ "GLOBAL_INJECTED_GERRIT_BRANCH" : "2.0.0-ONAP",
"GLOBAL_INJECTED_KEYSTONE" : "{{ .Values.openStackKeyStoneUrl }}",
"GLOBAL_INJECTED_LOG_ELASTICSEARCH_IP_ADDR" : "log-es.{{include "common.namespace" .}}",
"GLOBAL_INJECTED_LOG_KIBANA_IP_ADDR" : "log-kibana.{{include "common.namespace" .}}",
diff --git a/kubernetes/robot/templates/deployment.yaml b/kubernetes/robot/templates/deployment.yaml
index 0b658093bf..e5cc1d3a0f 100644
--- a/kubernetes/robot/templates/deployment.yaml
+++ b/kubernetes/robot/templates/deployment.yaml
@@ -77,12 +77,14 @@ spec:
subPath: sdngc_interface.robot
- name: robot-resources
mountPath: /var/opt/OpenECOMP_ETE/robot/resources/oof_interface.robot
- subPath: oof_interface.robot
+ subPath: oof_interface.robot
- name: robot-lighttpd-authorization
mountPath: /etc/lighttpd/authorization
subPath: authorization
- name: demodir
mountPath: /share
+ - name: robot-logs
+ mountPath: /share/logs
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- if .Values.nodeSelector }}
@@ -94,6 +96,13 @@ spec:
{{ toYaml .Values.affinity | indent 10 }}
{{- end }}
volumes:
+ {{- if .Values.persistence.enabled }}
+ - name: robot-logs
+ persistentVolumeClaim:
+ claimName: {{ include "common.fullname" . }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
- name: localtime
hostPath:
path: /etc/localtime
diff --git a/kubernetes/vid/charts/mariadb/templates/pv.yaml b/kubernetes/robot/templates/pv.yaml
index 184728f8ad..184728f8ad 100644
--- a/kubernetes/vid/charts/mariadb/templates/pv.yaml
+++ b/kubernetes/robot/templates/pv.yaml
diff --git a/kubernetes/vid/charts/mariadb/templates/pvc.yaml b/kubernetes/robot/templates/pvc.yaml
index e27c3311e9..e27c3311e9 100644
--- a/kubernetes/vid/charts/mariadb/templates/pvc.yaml
+++ b/kubernetes/robot/templates/pvc.yaml
diff --git a/kubernetes/robot/values.yaml b/kubernetes/robot/values.yaml
index 54d72a7431..aea67c84aa 100644
--- a/kubernetes/robot/values.yaml
+++ b/kubernetes/robot/values.yaml
@@ -18,10 +18,11 @@
global: # global defaults
nodePortPrefix: 302
ubuntuInitRepository: registry.hub.docker.com
+ persistence: {}
# application image
repository: nexus3.onap.org:10001
-image: onap/testsuite:1.2.0
+image: onap/testsuite:1.2.1
pullPolicy: Always
ubuntuInitImage: oomk8s/ubuntu-init:2.0.0
@@ -38,7 +39,7 @@ config:
# Password of the lighthttpd server. Used for HTML auth for webpage access
lightHttpdPassword: robot
# gerrit branch where the latest heat code is checked in
- gerritBranch: master
+ gerritBranch: 2.0.0-ONAP
# gerrit project where the latest heat code is checked in
gerritProject: http://gerrit.onap.org/r/demo.git
@@ -79,7 +80,7 @@ openStackPrivateNetCidr: "10.0.0.0/8"
openStackOamNetworkCidrPrefix: "10.0"
# Override with Pub Key for access to VNF
vnfPubKey: "FILL_IN_WITH_PUB_KEY"
-# Override with DCAE VES Collector external IP
+# Override with DCAE VES Collector external IP
dcaeCollectorIp: "FILL_IN_WITH_DCAE_VES_COLLECTOR_IP"
# default number of instances
@@ -133,3 +134,26 @@ resources: {}
# requests:
# cpu: 2
# memory: 4Gi
+
+## Persist data to a persitent volume
+persistence:
+ enabled: true
+
+ ## A manually managed Persistent Volume and Claim
+ ## Requires persistence.enabled: true
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ volumeReclaimPolicy: Retain
+
+ ## database data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ accessMode: ReadWriteMany
+ size: 2Gi
+ mountPath: /dockerdata-nfs
+ mountSubPath: robot/logs \ No newline at end of file
diff --git a/kubernetes/vid/charts/mariadb/.helmignore b/kubernetes/sdc/charts/sdc-act/.helmignore
index f0c1319444..f0c1319444 100644
--- a/kubernetes/vid/charts/mariadb/.helmignore
+++ b/kubernetes/sdc/charts/sdc-act/.helmignore
diff --git a/kubernetes/config/Chart.yaml b/kubernetes/sdc/charts/sdc-act/Chart.yaml
index 5c114e0761..7fa0d2d008 100644
--- a/kubernetes/config/Chart.yaml
+++ b/kubernetes/sdc/charts/sdc-act/Chart.yaml
@@ -13,6 +13,6 @@
# limitations under the License.
apiVersion: v1
-description: ONAP configuration pod
-name: config
-version: 1.1.0
+description: ONAP Activity Spec WebService
+name: sdc-act
+version: 2.0.0 \ No newline at end of file
diff --git a/kubernetes/sdc/charts/sdc-act/resources/config/configuration.yaml b/kubernetes/sdc/charts/sdc-act/resources/config/configuration.yaml
new file mode 100644
index 0000000000..af380172e0
--- /dev/null
+++ b/kubernetes/sdc/charts/sdc-act/resources/config/configuration.yaml
@@ -0,0 +1,12 @@
+cassandraConfig:
+ cassandraHosts: {{ .Values.config.cassandraHosts }}
+ reconnectTimeout : {{ .Values.config.cassandraReconnectTimeout }}
+ socketReadTimeout: {{ .Values.config.cassandraSocketReadTimeout }}
+ socketConnectTimeout: {{ .Values.config.cassandraSocketConnectTimeout }}
+ authenticate: true
+ username: {{ .Values.config.cassandraUserName }}
+ password: {{ .Values.config.cassandraUserPassword }}
+ ssl: {{ .Values.config.cassandraSslEnabled }}
+ truststorePath : {{ .Values.config.cassandraTruststorePath }}
+ truststorePassword : {{ .Values.config.cassandraTruststorePassword }}
+
diff --git a/kubernetes/sdc/charts/sdc-act/templates/NOTES.txt b/kubernetes/sdc/charts/sdc-act/templates/NOTES.txt
new file mode 100644
index 0000000000..41f9706fec
--- /dev/null
+++ b/kubernetes/sdc/charts/sdc-act/templates/NOTES.txt
@@ -0,0 +1,33 @@
+# Copyright © 2018 Amdocs, AT&T, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "common.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ include "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/vid/charts/mariadb/templates/configmap.yaml b/kubernetes/sdc/charts/sdc-act/templates/configmap.yaml
index 8a35df3f30..4ccc7cc526 100644
--- a/kubernetes/vid/charts/mariadb/templates/configmap.yaml
+++ b/kubernetes/sdc/charts/sdc-act/templates/configmap.yaml
@@ -15,7 +15,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
- name: {{ include "common.fullname" . }}-lfconfig
+ name: {{ include "common.fullname" . }}-configmap
namespace: {{ include "common.namespace" . }}
data:
-{{ tpl (.Files.Glob "resources/config/lf_config/*").AsConfig . | indent 2 }}
+{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }} \ No newline at end of file
diff --git a/kubernetes/vid/charts/mariadb/templates/deployment.yaml b/kubernetes/sdc/charts/sdc-act/templates/deployment.yaml
index 0560f60d8f..542982fd63 100644
--- a/kubernetes/vid/charts/mariadb/templates/deployment.yaml
+++ b/kubernetes/sdc/charts/sdc-act/templates/deployment.yaml
@@ -1,4 +1,4 @@
-# Copyright © 2017 Amdocs, Bell Canada
+# Copyright © 2017 Amdocs, AT&T, Bell Canada
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -30,80 +30,51 @@ spec:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
spec:
+ initContainers:
+ - name: {{ include "common.name" . }}-job-completion
+ image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+ imagePullPolicy: "{{ .Values.global.pullPolicy | default .Values.pullPolicy }}"
+ command:
+ - /root/job_complete.py
+ args:
+ - --job-name
+ - {{ include "common.fullname" . }}-init-activity-spec
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
containers:
- name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
ports:
- containerPort: {{ .Values.service.internalPort }}
- {{- if eq .Values.liveness.enabled true }}
+ # disable liveness probe when breakpoints set in debugger
+ # so K8s doesn't restart unresponsive container
+ {{ if .Values.liveness.enabled }}
livenessProbe:
tcpSocket:
port: {{ .Values.service.internalPort }}
initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
periodSeconds: {{ .Values.liveness.periodSeconds }}
- {{ end -}}
+ {{ end }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.internalPort }}
initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.readiness.periodSeconds }}
env:
- - name: MYSQL_DATABASE
- value: "{{ .Values.config.mysqldb }}"
- - name: MYSQL_USER
- value: "{{ .Values.config.mysqluser }}"
- - name: MYSQL_PASSWORD
- valueFrom:
- secretKeyRef:
- name: {{ template "common.fullname" . }}
- key: db-password
- - name: MYSQL_ROOT_PASSWORD
- valueFrom:
- secretKeyRef:
- name: {{ template "common.fullname" . }}
- key: db-root-password
+ - name: JAVA_OPTIONS
+ value: {{ .Values.config.javaOptions }}
volumeMounts:
- - mountPath: /etc/localtime
- name: localtime
- readOnly: true
- - mountPath: /var/lib/mysql
- name: mariadb-data
- - mountPath: /docker-entrypoint-initdb.d/vid-pre-init.sql
- name: lfconfig
- subPath: vid-pre-init.sql
- - mountPath: /etc/mysql/my.cnf
- name: lfconfig
- subPath: my.cnf
- resources:
-{{ toYaml .Values.resources | indent 12 }}
- {{- if .Values.nodeSelector }}
- nodeSelector:
-{{ toYaml .Values.nodeSelector | indent 10 }}
- {{- end -}}
- {{- if .Values.affinity }}
- affinity:
-{{ toYaml .Values.affinity | indent 10 }}
- {{- end }}
+ - name: {{ include "common.fullname" . }}-config
+ mountPath: /etc/onap/activity-spec/be/config
volumes:
- - name: mariadb-data
- {{- if .Values.persistence.enabled }}
- persistentVolumeClaim:
- claimName: {{ include "common.fullname" . }}
- {{- else }}
- emptyDir: {}
- {{- end }}
- - name: localtime
- hostPath:
- path: /etc/localtime
- - name: lfconfig
- configMap:
- name: {{ include "common.fullname" . }}-lfconfig
- defaultMode: 0755
- items:
- - key: vid-my.cnf
- path: my.cnf
- - key: vid-pre-init.sql
- path: vid-pre-init.sql
+ - name: {{ include "common.fullname" . }}-config
+ configMap:
+ name : {{ include "common.fullname" . }}-configmap
+ defaultMode: 0755
imagePullSecrets:
- - name: "{{ include "common.namespace" . }}-docker-registry-key"
+ - name: "{{ include "common.namespace" . }}-docker-registry-key" \ No newline at end of file
diff --git a/kubernetes/sdc/charts/sdc-act/templates/job.yaml b/kubernetes/sdc/charts/sdc-act/templates/job.yaml
new file mode 100644
index 0000000000..bd9d9d1e00
--- /dev/null
+++ b/kubernetes/sdc/charts/sdc-act/templates/job.yaml
@@ -0,0 +1,64 @@
+# Copyright © 2017 Amdocs, AT&T, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "common.fullname" . }}-init-activity-spec
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}-job
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ backoffLimit: 20
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}-job
+ release: {{ .Release.Name }}
+ spec:
+ restartPolicy: Never
+ initContainers:
+ - name: {{ include "common.name" . }}-init-readiness
+ image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - sdc-cs
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ containers:
+ - name: {{ include "common.name" . }}-job
+ image: "{{ include "common.repository" . }}/{{ .Values.configInitImage }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ env:
+ - name: CS_HOST
+ value: sdc-cs
+ - name: CS_USER
+ valueFrom:
+ secretKeyRef: {name: {{ .Release.Name }}-sdc-cs-secrets, key: sdc_user}
+ - name: CS_PASSWORD
+ valueFrom:
+ secretKeyRef: {name: {{ .Release.Name }}-sdc-cs-secrets, key: sdc_password}
+ imagePullSecrets:
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
+ restartPolicy: Never
diff --git a/kubernetes/sdc/charts/sdc-act/templates/service.yaml b/kubernetes/sdc/charts/sdc-act/templates/service.yaml
new file mode 100644
index 0000000000..736ef740ee
--- /dev/null
+++ b/kubernetes/sdc/charts/sdc-act/templates/service.yaml
@@ -0,0 +1,39 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "common.servicename" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ {{if eq .Values.service.type "NodePort" -}}
+ - port: {{ .Values.service.internalPort }}
+ nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
+ name: {{ .Values.service.portName | default "http" }}
+ {{- else -}}
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ name: {{ .Values.service.portName | default "http" }}
+ {{- end}}
+ selector:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }} \ No newline at end of file
diff --git a/kubernetes/vid/charts/mariadb/values.yaml b/kubernetes/sdc/charts/sdc-act/values.yaml
index c23d8bebfa..0df41573ea 100644
--- a/kubernetes/vid/charts/mariadb/values.yaml
+++ b/kubernetes/sdc/charts/sdc-act/values.yaml
@@ -12,25 +12,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Default values for mariadb.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-global: # global defaults
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
nodePortPrefix: 302
- persistence: {}
-
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+#################################################################
+# Application configuration defaults.
+#################################################################
# application image
repository: nexus3.onap.org:10001
-image: library/mariadb:10
+image: onap/activity-spec:1.3.0-SNAPSHOT
+configInitImage: onap/activity-spec-init:1.3.0-SNAPSHOT
pullPolicy: Always
-# application configuration
+# flag to enable debugging - application support required
+debugEnabled: false
+
config:
- mysqldb: vid_openecomp_epsdk
- mysqluser: vidadmin
- mariadbRootPassword: LF+tp_1WqgSY
- mariadbPassword: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
+ javaOptions: "-Xmx1536m -Xms1536m"
+ cassandraSslEnabled: "false"
+ cassandraHosts: ['sdc-cs']
+ cassandraUserName: "asdc_user"
+ cassandraUserPassword: "Aa1234%^!"
+ cassandraReconnectTimeout: 30000
+ cassandraSocketReadTimeout: 20000
+ cassandraSocketConnectTimeout: 20000
+ cassandraTruststorePath: "/path/path"
+ cassandraTruststorePassword: "Aa123456"
# default number of instances
replicaCount: 1
@@ -41,43 +55,22 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 60
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 60
periodSeconds: 10
-## Persist data to a persitent volume
-persistence:
- enabled: true
-
- ## A manually managed Persistent Volume and Claim
- ## Requires persistence.enabled: true
- ## If defined, PVC must be created manually before volume will be bound
- # existingClaim:
- volumeReclaimPolicy: Retain
-
- ## database data Persistent Volume Storage Class
- ## If defined, storageClassName: <storageClass>
- ## If set to "-", storageClassName: "", which disables dynamic provisioning
- ## If undefined (the default) or set to null, no storageClassName spec is
- ## set, choosing the default provisioner. (gp2 on AWS, standard on
- ## GKE, AWS & OpenStack)
- ##
- # storageClass: "-"
- accessMode: ReadWriteMany
- size: 2Gi
- mountPath: /dockerdata-nfs
- mountSubPath: vid/mariadb/data
-
service:
- name: vid-mariadb
- portName: vid-mariadb
- internalPort: 3306
+ type: NodePort
+ internalPort: 8080
+ externalPort: 8080
+ portName: sdc-act
+ nodePort: "57"
ingress:
enabled: false
diff --git a/kubernetes/sdc/charts/sdc-be/values.yaml b/kubernetes/sdc/charts/sdc-be/values.yaml
index 75e096121f..113cb108d1 100644
--- a/kubernetes/sdc/charts/sdc-be/values.yaml
+++ b/kubernetes/sdc/charts/sdc-be/values.yaml
@@ -27,8 +27,8 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdc-backend:1.2-STAGING-latest
-backendInitImage: onap/sdc-backend-init:1.2-STAGING-latest
+image: onap/sdc-backend:1.2.0
+backendInitImage: onap/sdc-backend-init:1.2.0
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/sdc/charts/sdc-cs/values.yaml b/kubernetes/sdc/charts/sdc-cs/values.yaml
index 3baf2e0805..373d6e81ab 100644
--- a/kubernetes/sdc/charts/sdc-cs/values.yaml
+++ b/kubernetes/sdc/charts/sdc-cs/values.yaml
@@ -27,8 +27,8 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdc-cassandra:1.2-STAGING-latest
-cassandraInitImage: onap/sdc-cassandra-init:1.2-STAGING-latest
+image: onap/sdc-cassandra:1.2.0
+cassandraInitImage: onap/sdc-cassandra-init:1.2.0
pullPolicy: Always
diff --git a/kubernetes/sdc/charts/sdc-es/values.yaml b/kubernetes/sdc/charts/sdc-es/values.yaml
index 31986a0a96..d90e9840b6 100644
--- a/kubernetes/sdc/charts/sdc-es/values.yaml
+++ b/kubernetes/sdc/charts/sdc-es/values.yaml
@@ -30,8 +30,8 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdc-elasticsearch:1.2-STAGING-latest
-elasticInitImage: onap/sdc-init-elasticsearch:1.2-STAGING-latest
+image: onap/sdc-elasticsearch:1.2.0
+elasticInitImage: onap/sdc-init-elasticsearch:1.2.0
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/sdc/charts/sdc-fe/values.yaml b/kubernetes/sdc/charts/sdc-fe/values.yaml
index 9d65b6584c..e57c226a21 100644
--- a/kubernetes/sdc/charts/sdc-fe/values.yaml
+++ b/kubernetes/sdc/charts/sdc-fe/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdc-frontend:1.2-STAGING-latest
+image: onap/sdc-frontend:1.2.0
pullPolicy: Always
config:
diff --git a/kubernetes/sdc/charts/sdc-kb/values.yaml b/kubernetes/sdc/charts/sdc-kb/values.yaml
index 2dc551fc16..bc481ac3f5 100644
--- a/kubernetes/sdc/charts/sdc-kb/values.yaml
+++ b/kubernetes/sdc/charts/sdc-kb/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdc-kibana:1.2-STAGING-latest
+image: onap/sdc-kibana:1.2.0
pullPolicy: Always
config:
diff --git a/kubernetes/sdc/charts/sdc-onboarding-be/values.yaml b/kubernetes/sdc/charts/sdc-onboarding-be/values.yaml
index 24ad1389d0..abee401ba6 100644
--- a/kubernetes/sdc/charts/sdc-onboarding-be/values.yaml
+++ b/kubernetes/sdc/charts/sdc-onboarding-be/values.yaml
@@ -27,8 +27,8 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdc-onboard-backend:1.2-STAGING-latest
-onboardingInitImage: onap/sdc-onboard-cassandra-init:1.2-STAGING-latest
+image: onap/sdc-onboard-backend:1.2.0
+onboardingInitImage: onap/sdc-onboard-cassandra-init:1.2.0
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/sdc/charts/sdc-wfd/values.yaml b/kubernetes/sdc/charts/sdc-wfd/values.yaml
index 754899b672..ca11780f22 100644
--- a/kubernetes/sdc/charts/sdc-wfd/values.yaml
+++ b/kubernetes/sdc/charts/sdc-wfd/values.yaml
@@ -23,7 +23,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdc/sdc-workflow-designer:1.1.0-SNAPSHOT-STAGING-latest
+image: onap/sdc/sdc-workflow-designer:1.1.0
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/sdnc/Makefile b/kubernetes/sdnc/Makefile
new file mode 100644
index 0000000000..d634a8c506
--- /dev/null
+++ b/kubernetes/sdnc/Makefile
@@ -0,0 +1,51 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# FIXME OOM-765
+ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+OUTPUT_DIR := $(ROOT_DIR)/../dist
+PACKAGE_DIR := $(OUTPUT_DIR)/packages
+SECRET_DIR := $(OUTPUT_DIR)/secrets
+
+EXCLUDES :=
+HELM_CHARTS := $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
+
+.PHONY: $(EXCLUDES) $(HELM_CHARTS)
+
+all: $(HELM_CHARTS)
+
+$(HELM_CHARTS):
+ @echo "\n[$@]"
+ @make package-$@
+
+make-%:
+ @if [ -f $*/Makefile ]; then make -C $*; fi
+
+dep-%: make-%
+ @if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
+
+lint-%: dep-%
+ @if [ -f $*/Chart.yaml ]; then helm lint $*; fi
+
+package-%: lint-%
+ @mkdir -p $(PACKAGE_DIR)
+ @if [ -f $*/Chart.yaml ]; then helm package -d $(PACKAGE_DIR) $*; fi
+ @helm repo index $(PACKAGE_DIR)
+
+clean:
+ @rm -f */requirements.lock
+ @rm -f *tgz */charts/*tgz
+ @rm -rf $(PACKAGE_DIR)
+%:
+ @: \ No newline at end of file
diff --git a/kubernetes/sdnc/charts/dmaap-listener/values.yaml b/kubernetes/sdnc/charts/dmaap-listener/values.yaml
index fec13eb654..9ddf590abd 100644
--- a/kubernetes/sdnc/charts/dmaap-listener/values.yaml
+++ b/kubernetes/sdnc/charts/dmaap-listener/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdnc-dmaap-listener-image:1.3-STAGING-latest
+image: onap/sdnc-dmaap-listener-image:1.4-STAGING-latest
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/sdnc/charts/sdnc-ansible-server/values.yaml b/kubernetes/sdnc/charts/sdnc-ansible-server/values.yaml
index 088008bf7e..895fa6ec40 100644
--- a/kubernetes/sdnc/charts/sdnc-ansible-server/values.yaml
+++ b/kubernetes/sdnc/charts/sdnc-ansible-server/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdnc-ansible-server-image:1.3-STAGING-latest
+image: onap/sdnc-ansible-server-image:1.4-STAGING-latest
pullPolicy: Always
# flag to enable debugging - application support required
@@ -49,7 +49,7 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 60
+ initialDelaySeconds: 180
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
diff --git a/kubernetes/sdnc/charts/sdnc-portal/resources/config/admportal.json b/kubernetes/sdnc/charts/sdnc-portal/resources/config/admportal.json
index f6d202ef09..a6950fdd70 100644
--- a/kubernetes/sdnc/charts/sdnc-portal/resources/config/admportal.json
+++ b/kubernetes/sdnc/charts/sdnc-portal/resources/config/admportal.json
@@ -36,7 +36,7 @@
"dbFabricPassword": "admin",
"dbFabricDB": "mysql",
"dbUser": "sdnctl",
- "dbPassword": "gamma",
+ "dbPassword": "{{.Values.config.dbSdnctlPassword}}",
"dbName": "sdnctl",
"odlProtocol": "http",
"odlHost": "sdnc.{{.Release.Namespace}}",
diff --git a/kubernetes/sdnc/charts/sdnc-portal/resources/config/dblib.properties b/kubernetes/sdnc/charts/sdnc-portal/resources/config/dblib.properties
index 9e4c88a879..68357baae6 100644
--- a/kubernetes/sdnc/charts/sdnc-portal/resources/config/dblib.properties
+++ b/kubernetes/sdnc/charts/sdnc-portal/resources/config/dblib.properties
@@ -22,7 +22,7 @@ org.onap.ccsdk.sli.jdbc.url=jdbc:mysql://{{.Values.mysql.service.name}}.{{.Relea
org.onap.ccsdk.sli.jdbc.driver=org.mariadb.jdbc.Driver
org.onap.ccsdk.sli.jdbc.database=sdnctl
org.onap.ccsdk.sli.jdbc.user=sdnctl
-org.onap.ccsdk.sli.jdbc.password=gamma
+org.onap.ccsdk.sli.jdbc.password={{.Values.config.dbSdnctlPassword}}
org.onap.ccsdk.sli.jdbc.connection.name=sdnctldb01
org.onap.ccsdk.sli.jdbc.connection.timeout=50
org.onap.ccsdk.sli.jdbc.request.timeout=100
diff --git a/kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties b/kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties
index e0e3295735..cc13a9d707 100644
--- a/kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties
+++ b/kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties
@@ -2,4 +2,4 @@ org.openecomp.sdnctl.sli.dbtype = jdbc
org.openecomp.sdnctl.sli.jdbc.url = jdbc:mysql://sdnc-sdnctldb01:3306/sdnctl
org.openecomp.sdnctl.sli.jdbc.database = sdnctl
org.openecomp.sdnctl.sli.jdbc.user = sdnctl
-org.openecomp.sdnctl.sli.jdbc.password = gamma \ No newline at end of file
+org.openecomp.sdnctl.sli.jdbc.password = {{.Values.config.dbSdnctlPassword}} \ No newline at end of file
diff --git a/kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties.sdnctldb02 b/kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties.sdnctldb02
index e665a56d75..c75c603f22 100644
--- a/kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties.sdnctldb02
+++ b/kubernetes/sdnc/charts/sdnc-portal/resources/config/svclogic.properties.sdnctldb02
@@ -2,4 +2,4 @@ org.openecomp.sdnctl.sli.dbtype = jdbc
org.openecomp.sdnctl.sli.jdbc.url = jdbc:mysql://sdnc-sdnctldb02:3306/sdnctl
org.openecomp.sdnctl.sli.jdbc.database = sdnctl
org.openecomp.sdnctl.sli.jdbc.user = sdnctl
-org.openecomp.sdnctl.sli.jdbc.password = gamma \ No newline at end of file
+org.openecomp.sdnctl.sli.jdbc.password = {{.Values.config.dbSdnctlPassword}} \ No newline at end of file
diff --git a/kubernetes/sdnc/charts/sdnc-portal/values.yaml b/kubernetes/sdnc/charts/sdnc-portal/values.yaml
index f5cd48c12f..19385031c7 100644
--- a/kubernetes/sdnc/charts/sdnc-portal/values.yaml
+++ b/kubernetes/sdnc/charts/sdnc-portal/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/admportal-sdnc-image:1.3-STAGING-latest
+image: onap/admportal-sdnc-image:1.4-STAGING-latest
pullPolicy: Always
# flag to enable debugging - application support required
@@ -37,6 +37,7 @@ debugEnabled: false
config:
mysqlChartName: sdnc-db
dbRootPassword: openECOMP1.0
+ dbSdnctlPassword: gamma
sdncChartName: sdnc
configDir: /opt/onap/sdnc/data/properties
odlPassword: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
@@ -50,7 +51,7 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 60
+ initialDelaySeconds: 180
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
diff --git a/kubernetes/sdnc/charts/ueb-listener/values.yaml b/kubernetes/sdnc/charts/ueb-listener/values.yaml
index 12f675c0e4..de9bd27291 100644
--- a/kubernetes/sdnc/charts/ueb-listener/values.yaml
+++ b/kubernetes/sdnc/charts/ueb-listener/values.yaml
@@ -27,7 +27,7 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/sdnc-ueb-listener-image:1.3-STAGING-latest
+image: onap/sdnc-ueb-listener-image:1.4-STAGING-latest
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/sdnc/requirements.yaml b/kubernetes/sdnc/requirements.yaml
index c458755a85..09348140be 100644
--- a/kubernetes/sdnc/requirements.yaml
+++ b/kubernetes/sdnc/requirements.yaml
@@ -21,4 +21,8 @@ dependencies:
repository: '@local'
- name: dgbuilder
version: ~2.0.0
- repository: '@local' \ No newline at end of file
+ repository: '@local'
+ - name: sdnc-prom
+ version: ~2.0.0
+ repository: '@local'
+ condition: config.geoEnabled
diff --git a/kubernetes/sdnc/resources/config/bin/installSdncDb.sh b/kubernetes/sdnc/resources/config/bin/installSdncDb.sh
index e574bd7aec..ab0bc35880 100644
--- a/kubernetes/sdnc/resources/config/bin/installSdncDb.sh
+++ b/kubernetes/sdnc/resources/config/bin/installSdncDb.sh
@@ -53,3 +53,6 @@ done
# Create VNIs 100-199
${SDNC_HOME}/bin/addVnis.sh 100 199
+
+# Drop FK_NETWORK_MODEL foreign key as workaround for SDNC-291.
+${SDNC_HOME}/bin/rmForeignKey.sh NETWORK_MODEL FK_NETWORK_MODEL \ No newline at end of file
diff --git a/kubernetes/sdnc/resources/config/bin/startODL.sh b/kubernetes/sdnc/resources/config/bin/startODL.sh
index 86f45d3fbd..9d1ab768fb 100755
--- a/kubernetes/sdnc/resources/config/bin/startODL.sh
+++ b/kubernetes/sdnc/resources/config/bin/startODL.sh
@@ -27,8 +27,10 @@ function enable_odl_cluster(){
fi
echo "Installing Opendaylight cluster features"
- ${ODL_HOME}/bin/client feature:install odl-mdsal-clustering
- ${ODL_HOME}/bin/client feature:install odl-jolokia
+ mv $ODL_HOME/etc/org.apache.karaf.features.cfg $ODL_HOME/etc/org.apache.karaf.features.cfg.orig
+ cat $ODL_HOME/etc/org.apache.karaf.features.cfg.orig | sed -e "\|featuresBoot=config|s|$|,odl-mdsal-clustering,odl-jolokia|" > $ODL_HOME/etc/org.apache.karaf.features.cfg
+ #${ODL_HOME}/bin/client feature:install odl-mdsal-clustering
+ #${ODL_HOME}/bin/client feature:install odl-jolokia
echo "Update cluster information statically"
hm=$(hostname)
@@ -102,13 +104,10 @@ then
${SDNC_HOME}/bin/installSdncDb.sh
echo "Installing SDN-C keyStore"
${SDNC_HOME}/bin/addSdncKeyStore.sh
- echo "Starting OpenDaylight"
- ${CCSDK_HOME}/bin/installOdlHostKey.sh
- ${ODL_HOME}/bin/start
- echo "Waiting ${SLEEP_TIME} seconds for OpenDaylight to initialize"
- sleep ${SLEEP_TIME}
- echo "Installing SDN-C platform features"
- ${SDNC_HOME}/bin/installFeatures.sh
+
+ # No longer needed (this was a workaround for bug in Nitrogen)
+ #${CCSDK_HOME}/bin/installOdlHostKey.sh
+
if [ -x ${SDNC_HOME}/svclogic/bin/install.sh ]
then
echo "Installing directed graphs"
@@ -117,14 +116,8 @@ then
if $ENABLE_ODL_CLUSTER ; then enable_odl_cluster ; fi
- echo "Restarting OpenDaylight"
- ${ODL_HOME}/bin/stop
-
- echo "Waiting 60 seconds for OpenDaylight stop to complete"
- sleep 60
-
echo "Installed at `date`" > ${SDNC_HOME}/.installed
fi
-exec ${ODL_HOME}/bin/karaf
+exec ${ODL_HOME}/bin/karaf server
diff --git a/kubernetes/sdnc/resources/config/conf/dblib.properties b/kubernetes/sdnc/resources/config/conf/dblib.properties
index 362726a6d8..dd2bcabcc5 100644
--- a/kubernetes/sdnc/resources/config/conf/dblib.properties
+++ b/kubernetes/sdnc/resources/config/conf/dblib.properties
@@ -22,7 +22,7 @@ org.onap.ccsdk.sli.jdbc.url=jdbc:mysql://{{.Values.mysql.service.name}}.{{.Relea
org.onap.ccsdk.sli.jdbc.driver=org.mariadb.jdbc.Driver
org.onap.ccsdk.sli.jdbc.database=sdnctl
org.onap.ccsdk.sli.jdbc.user=sdnctl
-org.onap.ccsdk.sli.jdbc.password=gamma
+org.onap.ccsdk.sli.jdbc.password={{.Values.config.dbSdnctlPassword}}
org.onap.ccsdk.sli.jdbc.connection.name=sdnctldb01
org.onap.ccsdk.sli.jdbc.connection.timeout=50
org.onap.ccsdk.sli.jdbc.request.timeout=100
diff --git a/kubernetes/sdnc/resources/config/conf/lcm-dg.properties b/kubernetes/sdnc/resources/config/conf/lcm-dg.properties
index 625cf63e03..9a39d0fd72 100644
--- a/kubernetes/sdnc/resources/config/conf/lcm-dg.properties
+++ b/kubernetes/sdnc/resources/config/conf/lcm-dg.properties
@@ -11,8 +11,8 @@ lcm.upgrade-software.playbookname=ansible_upgradesw
restapi.templateDir=/opt/onap/sdnc/restapi/templates
restapi.lcm.dmaap.publish.templatefile=lcm-dmaap-publish-template.json
lcm.dmaap.url=http://message-router.{{.Release.Namespace}}:{{.Values.config.dmaapPort}}/events/SDNC-LCM-WRITE
-lcm.dmaap.user=admin
-lcm.dmaap.password=admin
+lcm.dmaap.user=
+lcm.dmaap.password=
lcm.dmaap.version=1.0
-lcm.dmaap.partition=SDNC-LCM-WRITE
+lcm.dmaap.partition=MSO
lcm.dmaap.type=response \ No newline at end of file
diff --git a/kubernetes/sdnc/resources/config/conf/svclogic.properties b/kubernetes/sdnc/resources/config/conf/svclogic.properties
index 99f6cf84b7..e564012c8f 100644
--- a/kubernetes/sdnc/resources/config/conf/svclogic.properties
+++ b/kubernetes/sdnc/resources/config/conf/svclogic.properties
@@ -23,5 +23,5 @@ org.onap.ccsdk.sli.dbtype = jdbc
org.onap.ccsdk.sli.jdbc.url = jdbc:mysql://{{.Values.mysql.service.name}}.{{.Release.Namespace}}:{{.Values.mysql.service.internalPort}}/sdnctl
org.onap.ccsdk.sli.jdbc.database = sdnctl
org.onap.ccsdk.sli.jdbc.user = sdnctl
-org.onap.ccsdk.sli.jdbc.password = gamma
+org.onap.ccsdk.sli.jdbc.password = {{.Values.config.dbSdnctlPassword}}
diff --git a/kubernetes/sdnc/resources/env.yaml b/kubernetes/sdnc/resources/env.yaml
new file mode 100644
index 0000000000..2ad42f79a6
--- /dev/null
+++ b/kubernetes/sdnc/resources/env.yaml
@@ -0,0 +1,19 @@
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SDNC_GEO_ENABLED: "{{ .Values.config.geoEnabled }}"
+SDNC_IS_PRIMARY_CLUSTER: "{{ .Values.config.isPrimaryCluster }}"
+SDNC_ODL_COUNT: "{{ .Values.replicaCount }}"
+SDNC_LOCAL_K8S_CLUSTER_MASTER: "{{ .Values.config.myODLCluster }}"
+SDNC_REMOTE_K8S_CLUSTER_MASTER: "{{ .Values.config.peerODLCluster }}"
diff --git a/kubernetes/sdnc/resources/geo/bin/sdnc.cluster b/kubernetes/sdnc/resources/geo/bin/sdnc.cluster
index d59718fa27..87cdeffe89 100755
--- a/kubernetes/sdnc/resources/geo/bin/sdnc.cluster
+++ b/kubernetes/sdnc/resources/geo/bin/sdnc.cluster
@@ -1,6 +1,18 @@
#!/bin/bash
-OOM_HOME=${OOM_HOME:-$HOME}
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
if ! [ "$(command -v jq)" ]; then
echo "Error: jq is not installed."
@@ -8,7 +20,8 @@ if ! [ "$(command -v jq)" ]; then
exit 1
fi
-IS_PRIMARY_CLUSTER=`./sdnc.isPrimaryCluster`
+dir=$( dirname $0 )
+IS_PRIMARY_CLUSTER=$( $dir/sdnc.isPrimaryCluster )
case $IS_PRIMARY_CLUSTER in
true)
@@ -18,21 +31,30 @@ false)
MEMBER_NUMBER=4
;;
*)
- echo "Error: isPrimaryODLCluster not defined in ${OOM_HOME}/oom/kubernetes/sdnc/values.yaml."
+ echo "Error: isPrimaryCluster not defined in $dir/../../../values.yaml."
exit 1
;;
esac
+USERNAME=admin
+PASSWORD=admin
+
for pod_number in {0..2}
do
- curl "http://localhost:3026$((${pod_number} + 1))" > /dev/null 2>&1
- if [ "$?" = "7" ]; then
+
+ response=`curl -s -u $USERNAME:$PASSWORD -H "Content-Type: application/json" -H "Accept: application/json" -X GET http://localhost:3026$((${pod_number} + 1))/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-$((${MEMBER_NUMBER} + ${pod_number}))-shard-default-config,type=DistributedConfigDatastore`
+
+ if [ $? -ne 0 ]; then
continue
fi
- VOTING_RESULT=`curl -u admin:admin -H "Content-Type: application/json" -H "Accept: application/json" -X GET http://localhost:3026$((${pod_number} + 1))/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-$((${MEMBER_NUMBER} + ${pod_number}))-shard-default-config,type=DistributedConfigDatastore 2>/dev/null | jq '.value.Voting'`
+ status=$( echo -E "$response" | jq -r ".status" )
+ if [ "$status" != "200" ]; then
+ continue
+ fi
- case $VOTING_RESULT in
+ voting=$( echo -E "$response" | jq -r ".value.Voting" )
+ case $voting in
true)
echo "active"
exit 0
diff --git a/kubernetes/sdnc/resources/geo/bin/sdnc.failover b/kubernetes/sdnc/resources/geo/bin/sdnc.failover
deleted file mode 100755
index 961a5cb5cf..0000000000
--- a/kubernetes/sdnc/resources/geo/bin/sdnc.failover
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/perl -s
-use strict;
-
-my $keyWord_standby = "standby";
-my $keyWord_active = "active";
-my $keyWord_true = "true";
-my $keyWord_false = "false";
-my $keyWord_success = "success";
-my $keyWord_failure = "failure";
-my $file_cluster = "sdnc.cluster";
-my $file_switchVoting = "switchVoting.sh";
-my $file_isPrimaryCluster = "sdnc.isPrimaryCluster";
-
-if ((!(-e $file_cluster)) || (!(-e $file_switchVoting))|| (!(-e $file_isPrimaryCluster))) {
- # file not exist.
- print qq|$keyWord_failure\n|;
- exit 1;
-}
-
-my $roleRes = qx("./$file_isPrimaryCluster");
-my $clusterRes = qx("./$file_cluster");
-
-if ( index ($clusterRes, $keyWord_standby) != -1) {
- # We are at standby side
- if ( index ($roleRes, $keyWord_false) != -1) {
- # We are at Secondary cluster
- sub_activate_secondary();
- } elsif ( index ($roleRes, $keyWord_true) != -1) {
- # We are at Primary cluster
- sub_activate_primary();
- } else {
- # Error.
- print qq|$keyWord_failure\n|;
- exit 1;
- }
-} elsif ( index ($clusterRes, $keyWord_active) != -1) {
- # We are at active side
- if ( index ($roleRes, $keyWord_false) != -1) {
- # We are at Secondary cluster
- sub_activate_primary();
- } elsif ( index ($roleRes, $keyWord_true) != -1) {
- # We are at Primary cluster
- sub_activate_secondary();
- } else {
- # Error.
- print qq|$keyWord_failure\n|;
- exit 1;
- }
-} else {
- # Error.
- print qq|$keyWord_failure\n|;
- exit 1;
-}
-
-sub sub_activate_primary {
- #Switching voting in Primary cluster
- system("./$file_switchVoting primary");
- print qq|$keyWord_success\n|;
-}
-
-sub sub_activate_secondary {
- #Switching voting in secondary cluster
- system("./$file_switchVoting secondary");
- print qq|$keyWord_success\n|;
-}
diff --git a/kubernetes/sdnc/resources/geo/bin/sdnc.isPrimaryCluster b/kubernetes/sdnc/resources/geo/bin/sdnc.isPrimaryCluster
index 77fc65fe39..7a4f6a7dd0 100755
--- a/kubernetes/sdnc/resources/geo/bin/sdnc.isPrimaryCluster
+++ b/kubernetes/sdnc/resources/geo/bin/sdnc.isPrimaryCluster
@@ -1,8 +1,22 @@
#!/bin/bash
-OOM_HOME=${OOM_HOME:-$HOME}
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
-IS_PRIMARY_CLUSTER=`awk '/isPrimaryCluster/ {print $2}' ${OOM_HOME}/oom/kubernetes/sdnc/values.yaml`
+dir=$( dirname $0 )
+
+IS_PRIMARY_CLUSTER=`awk '/isPrimaryCluster/ {print $2}' $dir/../../../values.yaml`
if [ "$?" -eq "2" ]; then
echo "Make sure you are ubuntu user." >&2
diff --git a/kubernetes/sdnc/resources/geo/bin/sdnc.makeActive b/kubernetes/sdnc/resources/geo/bin/sdnc.makeActive
new file mode 100755
index 0000000000..76eca48af5
--- /dev/null
+++ b/kubernetes/sdnc/resources/geo/bin/sdnc.makeActive
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# -lt 1 ];then
+ echo "Usage: makeactive <release> [namespace]"
+ exit 1
+fi
+
+RELEASE=$1
+NAMESPACE=onap
+if [ -n "$2" ];then
+ NAMESPACE=$2
+fi
+
+dir=$(dirname $0)
+isPrimary=$( $dir/sdnc.isPrimaryCluster)
+if [ "$isPrimary" = "true" ];then
+ SITE_NAME="sdnc01"
+elif [ "$isPrimary" = "false" ];then
+ SITE_NAME="sdnc02"
+else
+ echo "sdnc.isPrimaryCluster returned unexpected value \"$isPrimary\""
+ exit 1
+fi
+
+pod=$( kubectl -n $NAMESPACE get pods -l app=sdnc-prom,release=$RELEASE | grep Running | cut -f1 -d' ' )
+if [ -z "$pod" ];then
+ echo "prom pod not found - is prom running?"
+ exit 1
+fi
+
+kubectl -n $NAMESPACE exec $pod -- /app/promoverride.py --id $SITE_NAME --config /app/config/config.json
diff --git a/kubernetes/config/onap-parameters.yaml b/kubernetes/sdnc/resources/geo/bin/sdnc.monitor
index cd5154f09c..b14bd7325d 100644..100755
--- a/kubernetes/config/onap-parameters.yaml
+++ b/kubernetes/sdnc/resources/geo/bin/sdnc.monitor
@@ -1,4 +1,6 @@
-# Copyright © 2017 Amdocs, Bell Canada
+#!/bin/sh
+
+# Copyright © 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,18 +14,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-OPENSTACK_UBUNTU_14_IMAGE: ""
-OPENSTACK_PUBLIC_NET_ID: ""
-OPENSTACK_OAM_NETWORK_ID: ""
-OPENSTACK_OAM_SUBNET_ID: ""
-OPENSTACK_OAM_NETWORK_CIDR: ""
-OPENSTACK_USERNAME: ""
-OPENSTACK_API_KEY: ""
-OPENSTACK_TENANT_NAME: ""
-OPENSTACK_TENANT_ID: ""
-OPENSTACK_REGION: ""
-OPENSTACK_KEYSTONE_URL: ""
-OPENSTACK_FLAVOUR_MEDIUM: ""
-OPENSTACK_SERVICE_TENANT_NAME: ""
-DMAAP_TOPIC: ""
-DEMO_ARTIFACTS_VERSION: ""
+if [ $# -lt 1 ];then
+ echo "Usage: $(basename $0) [--debug] <release> [namespace]"
+ exit 1
+fi
+
+if [ "$1" = "--debug" -o "$1" = "-debug" -o "$1" = "-d" ];then
+ DEBUG="--debug"
+ shift
+fi
+
+RELEASE=$1
+NAMESPACE=onap
+if [ -n "$2" ];then
+ NAMESPACE=$2
+fi
+
+pod=$( kubectl -n $NAMESPACE get pods -l app=sdnc-prom,release=$RELEASE | grep Running | cut -f1 -d' ' )
+if [ -z "$pod" ];then
+ echo "prom pod not found - is prom running?"
+ exit 1
+fi
+
+kubectl -n $NAMESPACE exec $pod -- /app/bin/sdnc.monitor $DEBUG
diff --git a/kubernetes/sdnc/resources/geo/bin/switchVoting.sh b/kubernetes/sdnc/resources/geo/bin/switchVoting.sh
index 27e4ead99d..7a1c193492 100755
--- a/kubernetes/sdnc/resources/geo/bin/switchVoting.sh
+++ b/kubernetes/sdnc/resources/geo/bin/switchVoting.sh
@@ -1,5 +1,19 @@
#!/bin/bash
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
function usage()
{
echo usage: switchVoting.sh primary\|secondary
@@ -10,18 +24,26 @@ if [ $# -ne 1 ]; then
usage
fi
-partition=$1
+dir=$( dirname $0 )
+USERNAME=admin
+PASSWORD=`awk '/odlPassword/ {print $2}' $dir/../../../values.yaml | head -1`
-if [ "$partition" == "primary" ]; then
- curl -u admin:{{.Values.config.odlPassword}} -H "Content-Type: application/json" -H "Accept: application/json" -X POST http://localhost:30202/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards -d '{ "input" : { "member-voting-state" : [ { "member-name" : "member-1", "voting":true}, { "member-name" : "member-2", "voting":true}, { "member-name" : "member-3", "voting":true},{ "member-name" : "member-4", "voting":false},{ "member-name" : "member-5", "voting":false},{ "member-name" : "member-6", "voting":false}] } }' > switch_voting_resp.json 2>/dev/null
- echo "" >> switch_voting_resp.json
- exit 0
-fi
+case "$1" in
-if [ "$partition" == "secondary" ]; then
- curl -u admin:{{.Values.config.odlPassword}} -H "Content-Type: application/json" -H "Accept: application/json" -X POST http://localhost:30202/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards -d '{ "input" : { "member-voting-state" : [ { "member-name" : "member-1", "voting":false}, { "member-name" : "member-2", "voting":false}, { "member-name" : "member-3", "voting":false},{ "member-name" : "member-4", "voting":true},{ "member-name" : "member-5", "voting":true},{ "member-name" : "member-6", "voting":true}] } }' > switch_voting_resp.json 2>/dev/null
- echo "" >> switch_voting_resp.json
- exit 0
-fi
+primary)
+ status=$(curl -u $USERNAME:$PASSWORD -o /dev/null -H "Content-Type: application/json" -H "Accept: application/json" -X POST http://localhost:30202/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards -d '{ "input" : { "member-voting-state" : [ { "member-name" : "member-1", "voting":true}, { "member-name" : "member-2", "voting":true}, { "member-name" : "member-3", "voting":true},{ "member-name" : "member-4", "voting":false},{ "member-name" : "member-5", "voting":false},{ "member-name" : "member-6", "voting":false}] } }' -w "%{http_code}\n" $url 2> /dev/null)
+;;
-usage
+secondary)
+ status=$(curl -u $USERNAME:$PASSWORD -o /dev/null -H "Content-Type: application/json" -H "Accept: application/json" -X POST http://localhost:30202/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards -d '{ "input" : { "member-voting-state" : [ { "member-name" : "member-1", "voting":false}, { "member-name" : "member-2", "voting":false}, { "member-name" : "member-3", "voting":false},{ "member-name" : "member-4", "voting":true},{ "member-name" : "member-5", "voting":true},{ "member-name" : "member-6", "voting":true}] } }' -w "%{http_code}\n" $url 2> /dev/null)
+;;
+
+*)
+ usage
+esac
+
+if [ $status -ne 200 ];then
+ echo "failure"
+else
+ echo "success"
+fi
diff --git a/kubernetes/config/values.yaml b/kubernetes/sdnc/sdnc-prom/Chart.yaml
index 9fbbbfa3ea..3c678e2b92 100644
--- a/kubernetes/config/values.yaml
+++ b/kubernetes/sdnc/sdnc-prom/Chart.yaml
@@ -1,4 +1,4 @@
-# Copyright © 2017 Amdocs, Bell Canada
+# Copyright © 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Default values for config.
-nsPrefix: onap
-dockerSharePath: /dockerdata-nfs
-image:
- repository: oomk8s/config-init:2.0.0-SNAPSHOT
- pullPolicy: Always
+apiVersion: v1
+description: ONAP SDNC Policy Driven Ownership Management
+name: sdnc-prom
+version: 2.0.0
diff --git a/kubernetes/sdnc/sdnc-prom/requirements.yaml b/kubernetes/sdnc/sdnc-prom/requirements.yaml
new file mode 100644
index 0000000000..37545fe2e0
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/requirements.yaml
@@ -0,0 +1,18 @@
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+ - name: common
+ version: ~2.0.0
+ repository: '@local'
diff --git a/kubernetes/sdnc/sdnc-prom/resources/bin/ensureSdncActive.sh b/kubernetes/sdnc/sdnc-prom/resources/bin/ensureSdncActive.sh
new file mode 100755
index 0000000000..fb24653129
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/bin/ensureSdncActive.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+debugLog(){
+ if [ "$enableDebugLogging" == true ]; then
+ if [ $# -eq 0 ]; then
+ echo "" >> $LOGFILE
+ else
+ echo $( date ) $@ >> $LOGFILE
+ fi
+ fi
+}
+
+failover(){
+ lockFile=/tmp/sdnc.failover.lock
+ # make sure that no failover is currently running
+ if [ -e ${lockFile} ] && kill -0 $(cat ${lockFile}) 2> /dev/null; then
+ debugLog "Currently running sdnc and dns failover"
+ return
+ fi
+ trap "rm -f ${lockFile}" INT TERM RETURN
+ echo $BASHPID > ${lockFile}
+
+ # perform takeover
+ debugLog "Started executing sdnc.failover for $SITE_NAME"
+ takeoverResult=$( /app/bin/sdnc.failover )
+ debugLog "Completed executing sdnc.failover. takeoverResult is: $takeoverResult"
+ if [ "success" = "$takeoverResult" ]; then
+ # update CoreDNS upon successful execution of sdnc.failover script
+ debugLog "Executing sdnc.dnsswitch"
+ /app/bin/sdnc.dnsswitch
+ rc=$?
+ debugLog "Completed executing sdnc.dnsswitch for $SITE_NAME. rc=$rc"
+ else
+ debugLog "Cluster takeover current status: $takeoverResult on $SITE_NAME."
+ rc=1
+ fi
+
+ if [ $rc -ne 0 ];then
+ takeoverResult="failure"
+ fi
+
+ data="{\
+\"type\": \"failover\",\
+\"status\": \"$takeoverResult\",\
+\"site\": \"$SITE_NAME\",\
+\"deployment\": \"{{.Values.config.deployment}}\",\
+\"timestamp\": \"$(date '+%F %T')\"\
+}"
+
+ # notifications are best-effort - ignore any failures
+ curl -H "Content-Type: application/json" -X POST --data "$data" http://$message_router/events/$topic >/dev/null 2>&1
+
+}
+
+LOGFILE="/app/geo.log"
+enableDebugLogging=true
+message_router=message-router:3904
+topic={{.Values.config.messageRouterTopic}}
+SITE_NAME="sdnc01"
+if [ "$SDNC_IS_PRIMARY_CLUSTER" = "false" ];then
+ SITE_NAME="sdnc02"
+fi
+
+debugLog
+debugLog "Executing ensureSdncActive"
+
+# query SDN-C cluster status
+debugLog "Started executing sdnc.cluster"
+clusterStatus=$( /app/bin/sdnc.cluster )
+debugLog "Completed executing sdnc.cluster. Cluster status is: $clusterStatus"
+
+if [ "active" = "$clusterStatus" ]; then
+ # peform health-check
+ debugLog "Started excuting sdnc.monitor"
+ health=$( /app/bin/sdnc.monitor )
+ debugLog "Completed executing sdnc.monitor. Cluster is: $health"
+
+ if [ "healthy" = "$health" ]; then
+ # Cluster is ACTIVE and HEALTHY
+ exit 0
+ fi
+ exit 1
+
+elif [ "standby" = "$clusterStatus" ]; then
+ # Run failover in background process and allow PROM to continue
+ ( failover & )
+ exit 0
+fi
+
+# Unknown cluster status
+exit 1
diff --git a/kubernetes/sdnc/sdnc-prom/resources/bin/ensureSdncStandby.sh b/kubernetes/sdnc/sdnc-prom/resources/bin/ensureSdncStandby.sh
new file mode 100755
index 0000000000..8dd84bd3ea
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/bin/ensureSdncStandby.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+debugLog(){
+ if [ "$enableDebugLogging" == true ]; then
+ if [ $# -eq 0 ]; then
+ echo "" >> $LOGFILE
+ else
+ echo $( date ) $@ >> $LOGFILE
+ fi
+ fi
+}
+
+LOGFILE="/app/geo.log"
+enableDebugLogging=true
+
+debugLog
+debugLog "Executing ensureSdncStandby"
+
+# query SDN-C cluster status
+debugLog "Started executing sdnc.cluster"
+clusterStatus=$( /app/bin/sdnc.cluster )
+debugLog "Completed executing sdnc.cluster. Cluster status is: $clusterStatus"
+
+if [ "active" = "$clusterStatus" ]; then
+ # assume transient error as other side transitions to ACTIVE
+ debugLog "Cluster status: $clusterStatus. exit 0"
+ exit 0
+
+elif [ "standby" = "$clusterStatus" ]; then
+ # check that standby cluster is healthy
+ debugLog "Started executing sdnc.monitor. Cluster status is: $clusterStatus"
+ health=$( /app/bin/sdnc.monitor )
+ debugLog "Completed executing sdnc.monitor. Cluster is: $health"
+ if [ "failure" = "$health" ];then
+ # Backup site is unhealthy - can't accept traffic!
+ exit 1
+ fi
+ # Cluster is standing by
+ exit 0
+fi
+
+debugLog "Unknown cluster status: $clusterStatus"
+# Unknown cluster status
+exit 1
diff --git a/kubernetes/sdnc/sdnc-prom/resources/bin/prom.sh b/kubernetes/sdnc/sdnc-prom/resources/bin/prom.sh
new file mode 100755
index 0000000000..c93ba24bd7
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/bin/prom.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "${SDNC_IS_PRIMARY_CLUSTER:-true}" = "true" ];then
+ id=sdnc01
+else
+ id=sdnc02
+fi
+
+# should PROM start as passive?
+state=$( bin/sdnc.cluster )
+if [ "$state" == "standby" ]; then
+ echo "Starting PROM in passive mode"
+ passive="-p"
+fi
+
+# start PROM as foreground process
+java -jar prom.jar --id $id $passive --config config
diff --git a/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.cluster b/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.cluster
new file mode 100755
index 0000000000..76603410d4
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.cluster
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# query ODL cluster state
+USERNAME="{{.Values.odl.jolokia.username}}"
+PASSWORD="{{.Values.odl.jolokia.password}}"
+
+count=${SDNC_ODL_COUNT:-1}
+memberStart=0
+if [ "${SDNC_IS_PRIMARY_CLUSTER:-true}" != "true" ];then
+ memberStart=$(( $memberStart + $count ))
+fi
+
+for instance in $(seq $count);do
+ shard=member-$(( $memberStart + $instance ))-shard-default-config
+ mbean=Category=Shards,name=$shard,type=DistributedConfigDatastore
+ url=http://{{.Release.Name}}-sdnc-$(( $instance-1 )).sdnc-cluster.{{.Release.Namespace}}:8181/jolokia/read/org.opendaylight.controller:$mbean
+
+ response=$( curl -s -u $USERNAME:$PASSWORD $url )
+ rc=$?
+ if [ $rc -ne 0 ];then
+ # failed to contact SDN-C instance - try another
+ continue
+ fi
+ status=$( echo -E "$response" | jq -r ".status" )
+ if [ "$status" != "200" ];then
+ # query failed, try another instance
+ continue
+ fi
+
+ voting=$( echo -E "$response" | jq -r ".value.Voting" )
+ case $voting in
+ true)
+ echo "active"
+ exit 0
+ ;;
+ false)
+ echo "standby"
+ exit 0
+ ;;
+ *)
+ echo "Error: Voting status could not be determined."
+ exit 1
+ ;;
+ esac
+done
+echo "Error: Voting status could not be determined."
+exit 1
diff --git a/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.dnsswitch b/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.dnsswitch
new file mode 100755
index 0000000000..209352c4e3
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.dnsswitch
@@ -0,0 +1,22 @@
+#! /bin/bash
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+####################################################################################################
+# sdncDnsSwitchWrapper.bash: Wrapper script to invoke SDNC DNS Switch for domain: sdnc.example.com #
+####################################################################################################
+ssh -i {{.Values.coreDNS.sshKeyFile}} -o StrictHostKeyChecking=no {{.Values.coreDNS.sshUser}}@{{.Values.coreDNS.host}} "{{.Values.coreDNS.switchScript}} $SDNC_LOCAL_K8S_CLUSTER_MASTER {{.Values.config.deployment}}"
+
+exit $?
diff --git a/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.failover b/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.failover
new file mode 100755
index 0000000000..e78b7eeee3
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.failover
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOGFILE="/app/geo.log"
+enableDebugLogging=true
+message_router=message-router:3904
+topic={{.Values.config.messageRouterTopic}}
+KEYWORD_success="success"
+KEYWORD_failure="failure"
+SITE_NAME="sdnc01"
+if [ "$SDNC_IS_PRIMARY_CLUSTER" = "false" ];then
+ SITE_NAME="sdnc02"
+fi
+
+APP_BIN=/app/bin
+
+debugLog(){
+ if [ "$enableDebugLogging" == true ]; then
+ if [ $# -eq 0 ]; then
+ echo "" >> $LOGFILE
+ else
+ echo $( date ) $@ >> $LOGFILE
+ fi
+ fi
+}
+
+EXC_SIMPLE_FAILOVER=`${APP_BIN}/switchVoting.sh`
+
+if [ "$EXC_SIMPLE_FAILOVER" == "success" ]; then
+ debugLog "Simple failover success. SDNC failover completed."
+else
+ # Simple failover failed. Trying catastrophic failover ...
+ debugLog "Simple failover failed. Trying catastrophic failover for $SITE_NAME ..."
+
+ # Notify Dmaap before executing catastrophic failover, because all connections will be reset.
+ data="{\
+ \"type\": \"Catastrophic failover\",\
+ \"reason\": \"Simple failover failed\",\
+ \"message_router\": \"$message_router\",\
+ \"topic\": \"$topic\",\
+ \"site\": \"$SITE_NAME\",\
+ \"deployment\": \"{{.Values.config.deployment}}\",\
+ \"timestamp\": \"$(date '+%F %T')\"\
+ }"
+
+ debugLog "$data"
+
+ # notifications to Dmaap
+ curl -H "Content-Type: application/json" -X POST --data "$data" http://$message_router/events/$topic >/dev/null 2>&1
+
+ # We're going to kill prom, so we need to do dnsswitch now
+
+ debugLog "Executing sdnc.dnsswitch"
+
+ /app/bin/sdnc.dnsswitch > /dev/null 2>&1
+ rc=$?
+ if [ $rc -ne 0 ];then
+ debugLog "sdnc.dnsswitch FAILED"
+ echo $KEYWORD_failure
+ exit 0
+ fi
+
+ # Now do catastrophic failure
+
+ debugLog "Catastrophic failover in progress"
+
+ ssh -o StrictHostKeyChecking=no -i /app/config/coredns/master.key root@$SDNC_LOCAL_K8S_CLUSTER_MASTER "su - ubuntu bash -c 'helm upgrade --set sdnc.config.geoEnabled=false dev local/onap --namespace onap; kubectl -n onap delete pods -l app=sdnc'" > /dev/null 2>&1
+
+ # Sleep here so prom can die without us passing control back to ensureSDNCActive
+ sleep 300
+fi
+
+echo $KEYWORD_success
diff --git a/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.monitor b/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.monitor
new file mode 100755
index 0000000000..0042ac368a
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/bin/sdnc.monitor
@@ -0,0 +1,125 @@
+#!/usr/bin/env python2
+# encoding: utf-8
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import os
+import json
+import requests
+from datetime import datetime
+
+consul_server = "consul-server:8500"
+message_router = "message-router:3904"
+topic = '{{.Values.config.messageRouterTopic}}'
+log_file='/app/monitor.log'
+status_file='/app/.health'
+logEnabled=False
+
+siteName='sdnc01'
+if os.environ.get('SDNC_IS_PRIMARY_CLUSTER', 'true') == 'false':
+ siteName='sdnc02'
+
+debug=False
+if len(sys.argv) > 1 and sys.argv[1] == '--debug':
+ debug=True
+
+def get_state(healthcheck):
+ response = requests.get("http://" + consul_server + "/v1/health/checks/" + healthcheck)
+ if response.status_code != 200:
+ raise RuntimeError("HTTP " + str(response.status_code))
+ data = response.json()
+ if len(data) == 0:
+ raise RuntimeError(healthcheck + " not found")
+ if len(data) > 1:
+ raise RuntimeError("Multiple states for " + healthcheck + " found")
+
+ return data[0]
+
+
+def log(message):
+ if logEnabled:
+ with open(log_file, 'a') as f:
+ f.write(str(datetime.now()) + " " + message + "\n")
+
+def healthcheck(checks, failFirst=True):
+ if len(checks) == 0:
+ return True
+
+ for check in checks:
+ if type(check) is list:
+ passing = healthcheck(check, False)
+ else:
+ state = get_state(check)
+ status = state['Status']
+ passing = status == "passing" or status == "warning"
+ log(check + " " + status)
+ if debug:
+ if status == "passing":
+ color = "\033[32m" # green
+ elif status == "warning":
+ color = "\033[33m" # yellow
+ else:
+ color = "\033[31m" # red
+ print check, color + status + "\033[0m"
+ if not passing:
+ print "\tCause:", state['Output']
+
+
+ if passing:
+ if not failFirst:
+ # found a passing check so can stop here
+ return True
+ else:
+ if failFirst:
+ # found a failing check so can stop here
+ return False
+
+ return failFirst
+
+
+try:
+ with open("/app/config/healthchecks.json") as f:
+ checks = json.load(f)
+
+ try:
+ with open(status_file) as f:
+ previous_result = f.read()
+ except IOError:
+ # file doesn't exist
+ previous_result = 'unknown'
+
+ if healthcheck(checks):
+ result = "healthy"
+ else:
+ result = "unhealthy"
+
+ print result
+
+ # save current result to file
+ with open(status_file, 'w') as f:
+ f.write(result)
+
+ if previous_result != 'unknown' and result != previous_result:
+ payload = { 'type' : 'health-change', 'status': result, 'site': siteName, 'deployment': '{{.Values.config.deployment}}', 'timestamp': str(datetime.now()) }
+ log("Posting event " + str(payload))
+ try:
+ requests.post("http://" + message_router + "/events/" + topic, data=json.dumps(payload), headers={ 'Content-Type' : 'application/json' } )
+ except Exception:
+ # events are best-effort
+ pass
+
+except Exception as e:
+ sys.exit(str(e))
diff --git a/kubernetes/sdnc/sdnc-prom/resources/bin/switchVoting.sh b/kubernetes/sdnc/sdnc-prom/resources/bin/switchVoting.sh
new file mode 100755
index 0000000000..f13196e7e8
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/bin/switchVoting.sh
@@ -0,0 +1,110 @@
+#/bin/sh
+
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+primary=${SDNC_IS_PRIMARY_CLUSTER:-true}
+
+url=http://sdnc:8282/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards
+username="${ODL_USERNAME:-{{.Values.odl.restconf.username}}}"
+password="${ODL_PASSWORD:-{{.Values.odl.restconf.password}}}"
+LOGFILE="/app/geo.log"
+enableDebugLogging=true
+
+debugLog(){
+ if [ "$enableDebugLogging" == true ]; then
+ if [ $# -eq 0 ]; then
+ echo "" >> $LOGFILE
+ else
+ echo $( date ) $@ >> $LOGFILE
+ fi
+ fi
+}
+
+
+if [ "$primary" = "true" ]; then
+ votingState='
+{
+ "input": {
+ "member-voting-state": [
+ {
+ "member-name": "member-1",
+ "voting": true
+ },
+ {
+ "member-name": "member-2",
+ "voting": true
+ },
+ {
+ "member-name": "member-3",
+ "voting": true
+ },
+ {
+ "member-name": "member-4",
+ "voting": false
+ },
+ {
+ "member-name": "member-5",
+ "voting": false
+ },
+ {
+ "member-name": "member-6",
+ "voting": false
+ }
+ ]
+ }
+}'
+else
+ votingState='
+{
+ "input": {
+ "member-voting-state": [
+ {
+ "member-name": "member-1",
+ "voting": false
+ },
+ {
+ "member-name": "member-2",
+ "voting": false
+ },
+ {
+ "member-name": "member-3",
+ "voting": false
+ },
+ {
+ "member-name": "member-4",
+ "voting": true
+ },
+ {
+ "member-name": "member-5",
+ "voting": true
+ },
+ {
+ "member-name": "member-6",
+ "voting": true
+ }
+ ]
+ }
+}'
+fi
+
+status=$(curl -s -u $username:$password -o /dev/null -H "Content-Type: application/json" -H "Accept: application/json" -X POST -d "$votingState" -w "%{http_code}\n" $url 2> /dev/null)
+if [ $status -ne 200 ];then
+ debugLog "Switch voting failed. status: $status ,username: $username ,password: $password ,votingState: $votingState ,url:$url "
+ echo "failure"
+else
+ echo "success"
+fi
+
diff --git a/kubernetes/sdnc/sdnc-prom/resources/config/config.json b/kubernetes/sdnc/sdnc-prom/resources/config/config.json
new file mode 100644
index 0000000000..54f95c140c
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/config/config.json
@@ -0,0 +1,19 @@
+{
+ "app-name": "{{.Values.config.deployment}}",
+ "aid": "{{.Values.config.aid}}",
+ "namespace": "{{.Values.config.deployment}}",
+ "userid": "{{.Values.config.deployment}}",
+ "password": "{{.Values.config.password}}",
+ "prom-timeout": "{{.Values.config.promTimeout}}",
+ "core-monitor-sleep-time": "{{.Values.config.coreMonitorSleepTime}}",
+ "no-of-retry-attempts": "{{.Values.config.noOfRetryAttempts}}",
+ "restart-backoff-time": "{{.Values.config.restartBackoffTime}}",
+ "replica-id-list": [ "sdnc01", "sdnc02" ],
+ "ensure-active-sdnc01": "/app/bin/ensureSdncActive.sh",
+ "ensure-active-sdnc02": "/app/bin/ensureSdncActive.sh",
+ "ensure-passive-sdnc01": "/app/bin/ensureSdncStandby.sh",
+ "ensure-passive-sdnc02": "/app/bin/ensureSdncStandby.sh",
+ "music-connection-timeout-ms": "{{.Values.config.musicConnectionTimeoutMs}}",
+ "music-location": {{.Values.config.musicLocation|toJson}},
+ "music-version": "2"
+}
diff --git a/kubernetes/sdnc/sdnc-prom/resources/config/healthchecks.json b/kubernetes/sdnc/sdnc-prom/resources/config/healthchecks.json
new file mode 100644
index 0000000000..ea8ceccc0c
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/resources/config/healthchecks.json
@@ -0,0 +1 @@
+{{.Values.config.healthChecks|toJson}}
diff --git a/kubernetes/vid/charts/mariadb/templates/secrets.yaml b/kubernetes/sdnc/sdnc-prom/templates/configmap.yaml
index 36096925f5..cf4332334b 100644
--- a/kubernetes/vid/charts/mariadb/templates/secrets.yaml
+++ b/kubernetes/sdnc/sdnc-prom/templates/configmap.yaml
@@ -1,4 +1,4 @@
-# Copyright © 2017 Amdocs, Bell Canada
+# Copyright © 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,16 +13,17 @@
# limitations under the License.
apiVersion: v1
-kind: Secret
+kind: ConfigMap
metadata:
- name: {{ include "common.fullname" . }}
+ name: {{ include "common.fullname" . }}-configmap
namespace: {{ include "common.namespace" . }}
- labels:
- app: {{ include "common.name" . }}
- chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-type: Opaque
data:
- db-root-password: {{ .Values.config.mariadbRootPassword | b64enc | quote }}
- db-password: {{ .Values.config.mariadbPassword | b64enc | quote }}
+{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-scripts
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/bin/*").AsConfig . | indent 2 }}
diff --git a/kubernetes/sdnc/sdnc-prom/templates/deployment.yaml b/kubernetes/sdnc/sdnc-prom/templates/deployment.yaml
new file mode 100644
index 0000000000..76f722c65f
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/templates/deployment.yaml
@@ -0,0 +1,95 @@
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - sdnc
+ - --container-name
+ - consul
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ name: {{ include "common.name" . }}-readiness
+ containers:
+ - name: {{ include "common.name" . }}
+ image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+ envFrom:
+ - configMapRef:
+ name: {{ .Release.Name }}-sdnc-env
+ workingDir: "/app"
+ command: [ "bin/prom.sh" ]
+ volumeMounts:
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: prom-config
+ mountPath: /app/config
+ - name: prom-scripts
+ mountPath: /app/bin
+ - name: core-dns-keyfile
+ mountPath: /app/config/coredns
+
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 10 }}
+ {{- end -}}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 10 }}
+ {{- end }}
+ volumes:
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: prom-config
+ configMap:
+ name: {{ include "common.fullname" . }}-configmap
+ - name: prom-scripts
+ configMap:
+ name: {{ include "common.fullname" . }}-scripts
+ defaultMode: 0755
+ - name: core-dns-keyfile
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}
+ imagePullSecrets:
+ - name: {{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/sdnc/sdnc-prom/values.yaml b/kubernetes/sdnc/sdnc-prom/values.yaml
new file mode 100644
index 0000000000..fbee9b9d96
--- /dev/null
+++ b/kubernetes/sdnc/sdnc-prom/values.yaml
@@ -0,0 +1,102 @@
+# Copyright © 2018 Amdocs
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 302
+ repository: nexus3.onap.org:10001
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+ persistence:
+ mountPath: /dockerdata-nfs
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+# application image
+repository: nexus3.onap.org:10001
+pullPolicy: Always
+image: onap/music/prom:1.0.5-latest
+
+# application configuration
+config:
+ # generate aid from onboarding your app in MUSIC
+ aid: "aid_for_your_app"
+ deployment: "test_onap"
+ password: "onap123"
+ musicLocation:
+ - "1.2.3.4"
+ - "1.2.3.5"
+ - "1.2.3.6"
+ musicConnectionTimeoutMs: "1000"
+ promTimeout: "35000"
+ coreMonitorSleepTime: "15000"
+ noOfRetryAttempts: "2"
+ restartBackoffTime: "15000"
+ healthChecks:
+ # All top-level checks must pass
+ - "Health Check: SDNC - SDN Host"
+ - "Health Check: SDNC"
+ - "Health Check: SDNC ODL Cluster"
+ - "Health Check: SDNC Portal"
+ # Within nested lists, only one must pass
+ - - "Health Check: SDNC-SDN-CTL-DB-01"
+ - "Health Check: SDNC-SDN-CTL-DB-02"
+ messageRouterTopic: "SDNC-GEO-REDUNDANCY"
+
+odl:
+ jolokia:
+ username: "admin"
+ password: "admin"
+ restconf:
+ username: "admin"
+ password: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"
+
+coreDNS:
+ host: 1.2.3.7
+ sshUser: root
+ sshKeyFile: /app/config/coredns/coredns.key
+ switchScript: /home/ubuntu/dnsSwitch.bash
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 90
+ periodSeconds: 90
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+readiness:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+
+persistence:
+ enabled: true
+ accessMode: ReadWriteOnce
+ size: 1Gi
+ mountPath: /dockerdata-nfs
+ mountSubPath: coredns
+
+ingress:
+ enabled: false
+
+resources: {}
diff --git a/kubernetes/sdnc/templates/configmap.yaml b/kubernetes/sdnc/templates/configmap.yaml
index e9498cb8dc..6f4ee2285e 100644
--- a/kubernetes/sdnc/templates/configmap.yaml
+++ b/kubernetes/sdnc/templates/configmap.yaml
@@ -43,3 +43,11 @@ metadata:
namespace: {{ include "common.namespace" . }}
data:
{{ tpl (.Files.Glob "resources/config/conf/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-env
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Get "resources/env.yaml") . | indent 2 }}
diff --git a/kubernetes/sdnc/templates/secrets.yaml b/kubernetes/sdnc/templates/secrets.yaml
index 754f117e38..a900132c31 100644
--- a/kubernetes/sdnc/templates/secrets.yaml
+++ b/kubernetes/sdnc/templates/secrets.yaml
@@ -25,3 +25,17 @@ metadata:
type: Opaque
data:
odl-password: {{ .Values.config.odlPassword | b64enc | quote }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.fullname" . }}-sdnctl
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+type: Opaque
+data:
+ db-sdnctl-password: {{ .Values.config.dbSdnctlPassword | b64enc | quote }}
diff --git a/kubernetes/sdnc/templates/statefulset.yaml b/kubernetes/sdnc/templates/statefulset.yaml
index 69816dffb4..03ae8800bd 100644
--- a/kubernetes/sdnc/templates/statefulset.yaml
+++ b/kubernetes/sdnc/templates/statefulset.yaml
@@ -74,6 +74,11 @@ spec:
secretKeyRef:
name: {{ template "common.fullname" . }}-odl
key: odl-password
+ - name: SDNC_DB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "common.fullname" . }}-sdnctl
+ key: db-sdnctl-password
- name: SDNC_CONFIG_DIR
value: "{{ .Values.config.configDir }}"
- name: ENABLE_ODL_CLUSTER
diff --git a/kubernetes/sdnc/values.yaml b/kubernetes/sdnc/values.yaml
index 89d6f7cc72..607fd05400 100644
--- a/kubernetes/sdnc/values.yaml
+++ b/kubernetes/sdnc/values.yaml
@@ -31,7 +31,7 @@ global:
# application images
repository: nexus3.onap.org:10001
pullPolicy: Always
-image: onap/sdnc-image:1.3-STAGING-latest
+image: onap/sdnc-image:1.4-STAGING-latest
# flag to enable debugging - application support required
debugEnabled: false
@@ -40,6 +40,7 @@ debugEnabled: false
config:
odlPassword: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
dbRootPassword: openECOMP1.0
+ dbSdnctlPassword: gamma
enableClustering: true
binDir: /opt/onap/sdnc/bin
geoEnabled: false
@@ -47,7 +48,7 @@ config:
# if geoEnabled is set to true the following 3 values must be set to their proper values
myODLCluster: 127.0.0.1
peerODLCluster: 127.0.0.1
- isPrimaryCluster: false
+ isPrimaryCluster: true
configDir: /opt/onap/sdnc/data/properties
dmaapTopic: SUCCESS
dmaapPort: 3904
@@ -56,7 +57,6 @@ config:
ansibleServiceName: sdnc-ansible-server
ansiblePort: 8000
-
# dependency / sub-chart configuration
dmaap-listener:
nameOverride: sdnc-dmaap-listener
@@ -83,6 +83,7 @@ sdnc-portal:
mysqlChartName: sdnc-db
configDir: /opt/onap/sdnc/data/properties
dbRootPassword: openECOMP1.0
+ dbSdnctlPassword: gamma
odlPassword: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
sdnc-ansible-server:
@@ -112,6 +113,8 @@ dgbuilder:
dbPodName: sdnc-db
dbServiceName: sdnc-dbhost
dbRootPassword: openECOMP1.0
+ dbSdnctlPassword: gamma
+ dgUserPassword: cc03e747a6afbbcbf8be7668acfebee5
service:
name: sdnc-dgbuilder
nodePort: "03"
diff --git a/kubernetes/sniro-emulator/values.yaml b/kubernetes/sniro-emulator/values.yaml
index 970578d063..e81481da2d 100644
--- a/kubernetes/sniro-emulator/values.yaml
+++ b/kubernetes/sniro-emulator/values.yaml
@@ -20,7 +20,7 @@ global: # global defaults
# application image
repository: nexus3.onap.org:10001
-image: onap/sniroemulator:latest
+image: onap/sniroemulator:1.0.0
pullPolicy: IfNotPresent
# flag to enable debugging - application support required
diff --git a/kubernetes/so/charts/mariadb/values.yaml b/kubernetes/so/charts/mariadb/values.yaml
index 186f825af7..693e3dd689 100644
--- a/kubernetes/so/charts/mariadb/values.yaml
+++ b/kubernetes/so/charts/mariadb/values.yaml
@@ -32,7 +32,7 @@ ubuntuInitImage: oomk8s/ubuntu-init:2.0.0
config:
mariadbRootPassword: password
# gerrit branch where the latest heat code is checked in
- gerritBranch: master
+ gerritBranch: 2.0.0-ONAP
# gerrit project where the latest heat code is checked in
gerritProject: http://gerrit.onap.org/r/so/docker-config.git
@@ -46,14 +46,14 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 90
+ initialDelaySeconds: 450
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 90
+ initialDelaySeconds: 450
periodSeconds: 10
## Persist data to a persitent volume
diff --git a/kubernetes/so/resources/config/mso/mso-docker.json b/kubernetes/so/resources/config/mso/mso-docker.json
index 8276d25a7c..167a8edc09 100755
--- a/kubernetes/so/resources/config/mso/mso-docker.json
+++ b/kubernetes/so/resources/config/mso/mso-docker.json
@@ -191,10 +191,10 @@
"sdncTimeoutFirewall": "20",
"callbackRetryAttempts": "30",
"callbackRetrySleepTime": "1000",
- "appcClientTopicRead": "APPC-LCM-READ",
- "appcClientTopicWrite": "APPC-LCM-WRITE",
- "appcClientTopicSdncRead": "SDNC-LCM-READ",
- "appcClientTopicSdncWrite": "SDNC-LCM-WRITE",
+ "appcClientTopicRead": "APPC-LCM-WRITE",
+ "appcClientTopicWrite": "APPC-LCM-READ",
+ "appcClientTopicSdncRead": "SDNC-LCM-WRITE",
+ "appcClientTopicSdncWrite": "SDNC-LCM-READ",
"appcClientTopicReadTimeout": "360000",
"appcClientResponseTime": "360000",
"appcClientPoolMembers": "{{.Values.config.messagerouter.serviceName}}:{{.Values.config.messagerouter.port}}",
diff --git a/kubernetes/so/values.yaml b/kubernetes/so/values.yaml
index b8b120c0d3..d7a769a9d5 100644
--- a/kubernetes/so/values.yaml
+++ b/kubernetes/so/values.yaml
@@ -27,7 +27,7 @@ subChartsOnly:
# application image
repository: nexus3.onap.org:10001
-image: openecomp/mso:1.2.1
+image: openecomp/mso:1.2.2
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/uui/charts/uui-server/values.yaml b/kubernetes/uui/charts/uui-server/values.yaml
index 55ae2d48b0..25a9538866 100644
--- a/kubernetes/uui/charts/uui-server/values.yaml
+++ b/kubernetes/uui/charts/uui-server/values.yaml
@@ -23,7 +23,7 @@ subChartsOnly:
# application image
repository: nexus3.onap.org:10001
-image: onap/usecase-ui/usecase-ui-server:latest
+image: onap/usecase-ui/usecase-ui-server:1.1.1
pullPolicy: Always
# application configuration
@@ -43,14 +43,14 @@ affinity: {}
# probe configuration parameters
liveness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 120
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 60
periodSeconds: 10
service:
diff --git a/kubernetes/uui/values.yaml b/kubernetes/uui/values.yaml
index 6ee7fb957d..66b277837e 100644
--- a/kubernetes/uui/values.yaml
+++ b/kubernetes/uui/values.yaml
@@ -23,7 +23,7 @@ subChartsOnly:
# application image
repository: nexus3.onap.org:10001
-image: onap/usecase-ui:latest
+image: onap/usecase-ui:1.1.1
pullPolicy: Always
# application configuration
diff --git a/kubernetes/vfc/charts/vfc-catalog/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-catalog/templates/deployment.yaml
index 084c320e31..9fbebb47b1 100644
--- a/kubernetes/vfc/charts/vfc-catalog/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-catalog/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-catalog/values.yaml b/kubernetes/vfc/charts/vfc-catalog/values.yaml
index 465a0ae964..70a0dac265 100644
--- a/kubernetes/vfc/charts/vfc-catalog/values.yaml
+++ b/kubernetes/vfc/charts/vfc-catalog/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/catalog:1.1.0-STAGING-latest
+image: onap/vfc/catalog:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-ems-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-ems-driver/templates/deployment.yaml
index 287f0a7d3e..9949157923 100644
--- a/kubernetes/vfc/charts/vfc-ems-driver/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-ems-driver/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-ems-driver/values.yaml b/kubernetes/vfc/charts/vfc-ems-driver/values.yaml
index cfdc8fa026..1fed3d8376 100644
--- a/kubernetes/vfc/charts/vfc-ems-driver/values.yaml
+++ b/kubernetes/vfc/charts/vfc-ems-driver/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/emsdriver:1.1.0-STAGING-latest
+image: onap/vfc/emsdriver:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-generic-vnfm-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-generic-vnfm-driver/templates/deployment.yaml
index 825bdcec63..e8f085802e 100644
--- a/kubernetes/vfc/charts/vfc-generic-vnfm-driver/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-generic-vnfm-driver/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-generic-vnfm-driver/values.yaml b/kubernetes/vfc/charts/vfc-generic-vnfm-driver/values.yaml
index b383a945b1..ea532e8a76 100644
--- a/kubernetes/vfc/charts/vfc-generic-vnfm-driver/values.yaml
+++ b/kubernetes/vfc/charts/vfc-generic-vnfm-driver/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/gvnfmdriver:1.1.0-STAGING-latest
+image: onap/vfc/gvnfmdriver:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-huawei-vnfm-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-huawei-vnfm-driver/templates/deployment.yaml
index 28b5950b48..ff307eb035 100644
--- a/kubernetes/vfc/charts/vfc-huawei-vnfm-driver/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-huawei-vnfm-driver/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-huawei-vnfm-driver/values.yaml b/kubernetes/vfc/charts/vfc-huawei-vnfm-driver/values.yaml
index 407837b37e..4e3ce4e2c0 100644
--- a/kubernetes/vfc/charts/vfc-huawei-vnfm-driver/values.yaml
+++ b/kubernetes/vfc/charts/vfc-huawei-vnfm-driver/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/nfvo/svnfm/huawei:1.1.0-STAGING-latest
+image: onap/vfc/nfvo/svnfm/huawei:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-juju-vnfm-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-juju-vnfm-driver/templates/deployment.yaml
index 1d1a5d7702..7f7a6e897b 100644
--- a/kubernetes/vfc/charts/vfc-juju-vnfm-driver/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-juju-vnfm-driver/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-juju-vnfm-driver/values.yaml b/kubernetes/vfc/charts/vfc-juju-vnfm-driver/values.yaml
index a27ee6ae50..1a2b9ff5a0 100644
--- a/kubernetes/vfc/charts/vfc-juju-vnfm-driver/values.yaml
+++ b/kubernetes/vfc/charts/vfc-juju-vnfm-driver/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/jujudriver:1.1.0-STAGING-latest
+image: onap/vfc/jujudriver:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-multivim-proxy/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-multivim-proxy/templates/deployment.yaml
index ba33ec587d..0bb774abd7 100644
--- a/kubernetes/vfc/charts/vfc-multivim-proxy/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-multivim-proxy/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-multivim-proxy/values.yaml b/kubernetes/vfc/charts/vfc-multivim-proxy/values.yaml
index 10eee7f0cb..4dd6229c6e 100644
--- a/kubernetes/vfc/charts/vfc-multivim-proxy/values.yaml
+++ b/kubernetes/vfc/charts/vfc-multivim-proxy/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/multivimproxy:1.0.0-STAGING-latest
+image: onap/vfc/multivimproxy:1.0.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/templates/deployment.yaml
index ba33ec587d..0bb774abd7 100644
--- a/kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/values.yaml b/kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/values.yaml
index b20446f34e..7019ecf1bf 100644
--- a/kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/values.yaml
+++ b/kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/nfvo/svnfm/nokiav2:1.1.0-STAGING-latest
+image: onap/vfc/nfvo/svnfm/nokiav2:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/deployment.yaml
index 9c4437ee0c..cab32301ad 100644
--- a/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/values.yaml b/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/values.yaml
index e4a29d96ec..d31707592d 100644
--- a/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/values.yaml
+++ b/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/nfvo/svnfm/nokia:1.1.0-STAGING-latest
+image: onap/vfc/nfvo/svnfm/nokia:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-nslcm/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-nslcm/templates/deployment.yaml
index b2481c8f33..adc44a4dc7 100644
--- a/kubernetes/vfc/charts/vfc-nslcm/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-nslcm/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-nslcm/values.yaml b/kubernetes/vfc/charts/vfc-nslcm/values.yaml
index 313b5a9c91..1b76d0c6c7 100644
--- a/kubernetes/vfc/charts/vfc-nslcm/values.yaml
+++ b/kubernetes/vfc/charts/vfc-nslcm/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/nslcm:1.1.0-STAGING-latest
+image: onap/vfc/nslcm:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-resmgr/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-resmgr/templates/deployment.yaml
index 27d5fcdd43..c42d22e463 100644
--- a/kubernetes/vfc/charts/vfc-resmgr/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-resmgr/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-resmgr/values.yaml b/kubernetes/vfc/charts/vfc-resmgr/values.yaml
index c1e7a63a15..d7de044521 100644
--- a/kubernetes/vfc/charts/vfc-resmgr/values.yaml
+++ b/kubernetes/vfc/charts/vfc-resmgr/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/resmanagement:1.1.0-STAGING-latest
+image: onap/vfc/resmanagement:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-vnflcm/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-vnflcm/templates/deployment.yaml
index ccea328c0c..17aaad731a 100644
--- a/kubernetes/vfc/charts/vfc-vnflcm/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-vnflcm/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-vnflcm/values.yaml b/kubernetes/vfc/charts/vfc-vnflcm/values.yaml
index 6afc34d44a..d4aad543b2 100644
--- a/kubernetes/vfc/charts/vfc-vnflcm/values.yaml
+++ b/kubernetes/vfc/charts/vfc-vnflcm/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/vnflcm:1.1.0-STAGING-latest
+image: onap/vfc/vnflcm:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-vnfmgr/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-vnfmgr/templates/deployment.yaml
index 9d31443088..0bfdf9955e 100644
--- a/kubernetes/vfc/charts/vfc-vnfmgr/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-vnfmgr/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-vnfmgr/values.yaml b/kubernetes/vfc/charts/vfc-vnfmgr/values.yaml
index 244e6f2b9f..227142e88a 100644
--- a/kubernetes/vfc/charts/vfc-vnfmgr/values.yaml
+++ b/kubernetes/vfc/charts/vfc-vnfmgr/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/vnfmgr:1.1.0-STAGING-latest
+image: onap/vfc/vnfmgr:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-vnfres/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-vnfres/templates/deployment.yaml
index 52e807abe5..10a5e7a9ff 100644
--- a/kubernetes/vfc/charts/vfc-vnfres/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-vnfres/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-vnfres/values.yaml b/kubernetes/vfc/charts/vfc-vnfres/values.yaml
index fec9089160..35bebec96e 100644
--- a/kubernetes/vfc/charts/vfc-vnfres/values.yaml
+++ b/kubernetes/vfc/charts/vfc-vnfres/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/vnfres:1.1.0-STAGING-latest
+image: onap/vfc/vnfres:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-workflow-engine/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-workflow-engine/templates/deployment.yaml
index adb077a518..a25cace8e7 100644
--- a/kubernetes/vfc/charts/vfc-workflow-engine/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-workflow-engine/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-workflow-engine/values.yaml b/kubernetes/vfc/charts/vfc-workflow-engine/values.yaml
index 5a7503472e..f17d4a546f 100644
--- a/kubernetes/vfc/charts/vfc-workflow-engine/values.yaml
+++ b/kubernetes/vfc/charts/vfc-workflow-engine/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/wfengine-activiti:latest
+image: onap/vfc/wfengine-activiti:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-workflow/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-workflow/templates/deployment.yaml
index e2b3d0bc96..0bbfd75074 100644
--- a/kubernetes/vfc/charts/vfc-workflow/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-workflow/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-workflow/values.yaml b/kubernetes/vfc/charts/vfc-workflow/values.yaml
index 94d8673bed..d59800006d 100644
--- a/kubernetes/vfc/charts/vfc-workflow/values.yaml
+++ b/kubernetes/vfc/charts/vfc-workflow/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/wfengine-mgrservice:latest
+image: onap/vfc/wfengine-mgrservice:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-zte-sdnc-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-zte-sdnc-driver/templates/deployment.yaml
index ba33ec587d..0bb774abd7 100644
--- a/kubernetes/vfc/charts/vfc-zte-sdnc-driver/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-zte-sdnc-driver/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-zte-sdnc-driver/values.yaml b/kubernetes/vfc/charts/vfc-zte-sdnc-driver/values.yaml
index 4a736e58f9..e319a07125 100644
--- a/kubernetes/vfc/charts/vfc-zte-sdnc-driver/values.yaml
+++ b/kubernetes/vfc/charts/vfc-zte-sdnc-driver/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/ztesdncdriver:1.1.0-STAGING-latest
+image: onap/vfc/ztesdncdriver:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vfc/charts/vfc-zte-vnfm-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-zte-vnfm-driver/templates/deployment.yaml
index 7a0f4a1132..8125215840 100644
--- a/kubernetes/vfc/charts/vfc-zte-vnfm-driver/templates/deployment.yaml
+++ b/kubernetes/vfc/charts/vfc-zte-vnfm-driver/templates/deployment.yaml
@@ -29,6 +29,8 @@ spec:
labels:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
+ annotations:
+ sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
spec:
initContainers:
#Example init container for dependency checking
diff --git a/kubernetes/vfc/charts/vfc-zte-vnfm-driver/values.yaml b/kubernetes/vfc/charts/vfc-zte-vnfm-driver/values.yaml
index 751e99eaf1..a9ad0a18d6 100644
--- a/kubernetes/vfc/charts/vfc-zte-vnfm-driver/values.yaml
+++ b/kubernetes/vfc/charts/vfc-zte-vnfm-driver/values.yaml
@@ -27,9 +27,12 @@ global:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/vfc/ztevnfmdriver:1.1.0-STAGING-latest
+image: onap/vfc/ztevnfmdriver:1.1.0
pullPolicy: Always
+#Istio sidecar injection policy
+istioSidecar: true
+
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/kubernetes/vid/charts/mariadb/resources/config/lf_config/vid-my.cnf b/kubernetes/vid/charts/mariadb/resources/config/lf_config/vid-my.cnf
deleted file mode 100644
index 472bf4698e..0000000000
--- a/kubernetes/vid/charts/mariadb/resources/config/lf_config/vid-my.cnf
+++ /dev/null
@@ -1,192 +0,0 @@
-# MariaDB database server configuration file.
-#
-# You can copy this file to one of:
-# - "/etc/mysql/my.cnf" to set global options,
-# - "~/.my.cnf" to set user-specific options.
-#
-# One can use all long options that the program supports.
-# Run program with --help to get a list of available options and with
-# --print-defaults to see which it would actually understand and use.
-#
-# For explanations see
-# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
-
-# This will be passed to all mysql clients
-# It has been reported that passwords should be enclosed with ticks/quotes
-# escpecially if they contain "#" chars...
-# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
-[client]
-port = 3306
-socket = /var/run/mysqld/mysqld.sock
-
-# Here is entries for some specific programs
-# The following values assume you have at least 32M ram
-
-# This was formally known as [safe_mysqld]. Both versions are currently parsed.
-[mysqld_safe]
-socket = /var/run/mysqld/mysqld.sock
-nice = 0
-
-[mysqld]
-lower_case_table_names = 1
-skip-host-cache
-skip-name-resolve
-#
-# * Basic Settings
-#
-#user = mysql
-pid-file = /var/run/mysqld/mysqld.pid
-socket = /var/run/mysqld/mysqld.sock
-port = 3306
-basedir = /usr
-datadir = /var/lib/mysql
-tmpdir = /tmp
-lc_messages_dir = /usr/share/mysql
-lc_messages = en_US
-skip-external-locking
-#
-# Instead of skip-networking the default is now to listen only on
-# localhost which is more compatible and is not less secure.
-#bind-address = 127.0.0.1
-#
-# * Fine Tuning
-#
-max_connections = 100
-connect_timeout = 5
-wait_timeout = 600
-max_allowed_packet = 16M
-thread_cache_size = 128
-sort_buffer_size = 4M
-bulk_insert_buffer_size = 16M
-tmp_table_size = 32M
-max_heap_table_size = 32M
-#
-# * MyISAM
-#
-# This replaces the startup script and checks MyISAM tables if needed
-# the first time they are touched. On error, make copy and try a repair.
-myisam_recover_options = BACKUP
-key_buffer_size = 128M
-#open-files-limit = 2000
-table_open_cache = 400
-myisam_sort_buffer_size = 512M
-concurrent_insert = 2
-read_buffer_size = 2M
-read_rnd_buffer_size = 1M
-#
-# * Query Cache Configuration
-#
-# Cache only tiny result sets, so we can fit more in the query cache.
-query_cache_limit = 128K
-query_cache_size = 64M
-# for more write intensive setups, set to DEMAND or OFF
-#query_cache_type = DEMAND
-#
-# * Logging and Replication
-#
-# Both location gets rotated by the cronjob.
-# Be aware that this log type is a performance killer.
-# As of 5.1 you can enable the log at runtime!
-#general_log_file = /var/log/mysql/mysql.log
-#general_log = 1
-#
-# Error logging goes to syslog due to /etc/mysql/conf.d/mysqld_safe_syslog.cnf.
-#
-# we do want to know about network errors and such
-#log_warnings = 2
-#
-# Enable the slow query log to see queries with especially long duration
-#slow_query_log[={0|1}]
-slow_query_log_file = /var/log/mysql/mariadb-slow.log
-long_query_time = 10
-#log_slow_rate_limit = 1000
-#log_slow_verbosity = query_plan
-
-#log-queries-not-using-indexes
-#log_slow_admin_statements
-#
-# The following can be used as easy to replay backup logs or for replication.
-# note: if you are setting up a replication slave, see README.Debian about
-# other settings you may need to change.
-#server-id = 1
-#report_host = master1
-#auto_increment_increment = 2
-#auto_increment_offset = 1
-#log_bin = /var/log/mysql/mariadb-bin
-#log_bin_index = /var/log/mysql/mariadb-bin.index
-# not fab for performance, but safer
-#sync_binlog = 1
-expire_logs_days = 10
-max_binlog_size = 100M
-# slaves
-#relay_log = /var/log/mysql/relay-bin
-#relay_log_index = /var/log/mysql/relay-bin.index
-#relay_log_info_file = /var/log/mysql/relay-bin.info
-#log_slave_updates
-#read_only
-#
-# If applications support it, this stricter sql_mode prevents some
-# mistakes like inserting invalid dates etc.
-#sql_mode = NO_ENGINE_SUBSTITUTION,TRADITIONAL
-#
-# * InnoDB
-#
-# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
-# Read the manual for more InnoDB related options. There are many!
-default_storage_engine = InnoDB
-# you can't just change log file size, requires special procedure
-#innodb_log_file_size = 50M
-innodb_buffer_pool_size = 256M
-innodb_log_buffer_size = 8M
-innodb_file_per_table = 1
-innodb_open_files = 400
-innodb_io_capacity = 400
-innodb_flush_method = O_DIRECT
-#
-# * Security Features
-#
-# Read the manual, too, if you want chroot!
-# chroot = /var/lib/mysql/
-#
-# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
-#
-# ssl-ca=/etc/mysql/cacert.pem
-# ssl-cert=/etc/mysql/server-cert.pem
-# ssl-key=/etc/mysql/server-key.pem
-
-#
-# * Galera-related settings
-#
-[galera]
-# Mandatory settings
-#wsrep_on=ON
-#wsrep_provider=
-#wsrep_cluster_address=
-#binlog_format=row
-#default_storage_engine=InnoDB
-#innodb_autoinc_lock_mode=2
-#
-# Allow server to accept connections on all interfaces.
-#
-#bind-address=0.0.0.0
-#
-# Optional setting
-#wsrep_slave_threads=1
-#innodb_flush_log_at_trx_commit=0
-
-[mysqldump]
-quick
-quote-names
-max_allowed_packet = 16M
-
-[mysql]
-#no-auto-rehash # faster start of mysql but no tab completion
-
-[isamchk]
-key_buffer = 16M
-
-#
-# * IMPORTANT: Additional settings that can override those from this file!
-# The files must end with '.cnf', otherwise they'll be ignored.
-#
-!includedir /etc/mysql/conf.d/
diff --git a/kubernetes/vid/charts/mariadb/resources/config/lf_config/vid-pre-init.sql b/kubernetes/vid/charts/mariadb/resources/config/lf_config/vid-pre-init.sql
deleted file mode 100644
index 8323a4ae86..0000000000
--- a/kubernetes/vid/charts/mariadb/resources/config/lf_config/vid-pre-init.sql
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright © 2017 AT&T, Amdocs, Bell Canada
-*
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-CREATE TABLE IF NOT EXISTS `vid_openecomp_epsdk`.`schema_info` (
- `SCHEMA_ID` VARCHAR(25) NOT NULL,
- `SCHEMA_DESC` VARCHAR(75) NOT NULL,
- `DATASOURCE_TYPE` VARCHAR(100) NULL DEFAULT NULL,
- `CONNECTION_URL` VARCHAR(200) NOT NULL,
- `USER_NAME` VARCHAR(45) NOT NULL,
- `PASSWORD` VARCHAR(45) NULL DEFAULT NULL,
- `DRIVER_CLASS` VARCHAR(100) NOT NULL,
- `MIN_POOL_SIZE` INT(11) NOT NULL,
- `MAX_POOL_SIZE` INT(11) NOT NULL,
- `IDLE_CONNECTION_TEST_PERIOD` INT(11) NOT NULL)
- ENGINE = InnoDB
- DEFAULT CHARACTER SET = utf8;
diff --git a/kubernetes/vid/charts/mariadb/templates/NOTES.txt b/kubernetes/vid/charts/mariadb/templates/NOTES.txt
deleted file mode 100644
index 75f0a7a6b3..0000000000
--- a/kubernetes/vid/charts/mariadb/templates/NOTES.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-1. Get the application URL by running these commands:
-{{- if .Values.ingress.enabled }}
-{{- range .Values.ingress.hosts }}
- http://{{ . }}
-{{- end }}
- export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
- echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
- export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ .Chart.Name }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
- echo "Visit http://127.0.0.1:8080 to use your application"
- kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
-{{- end }}
diff --git a/kubernetes/vid/charts/vid-galera/Chart.yaml b/kubernetes/vid/charts/vid-galera/Chart.yaml
new file mode 100644
index 0000000000..85f36dc6fb
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+description: Chart for MariaDB Galera cluster
+name: mariadb-galera
+version: 2.0.0
+keywords:
+ - mariadb
+ - mysql
+ - database
+ - sql
+ - galera
+ - cluster \ No newline at end of file
diff --git a/kubernetes/vid/charts/vid-galera/templates/NOTES.txt b/kubernetes/vid/charts/vid-galera/templates/NOTES.txt
new file mode 100644
index 0000000000..3dd25ac4b7
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/templates/NOTES.txt
@@ -0,0 +1,12 @@
+MariaDB-Galera service can be accessed via port 3306 on the following DNS name from within your cluster:
+{{ include "common.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
+
+To connect to your database:
+
+1. Run a pod that you can use as a client:
+
+ kubectl run {{ include "common.fullname" . }}-client --rm --tty -i --image mariadb --command -- bash
+
+2. Connect using the mysql cli, then provide your password:
+ $ mysql -h {{ include "common.fullname" . }} {{- if .Values.mysqlRootPassword }} -p {{ .Values.mysqlRootPassword }}{{- end -}}
+
diff --git a/kubernetes/vid/charts/vid-galera/templates/configmap.yaml b/kubernetes/vid/charts/vid-galera/templates/configmap.yaml
new file mode 100644
index 0000000000..ea90cd3e45
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/templates/configmap.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-confd
+ namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/conf.d/*").AsConfig . | indent 2 }}
+---
+{{- if .Values.externalConfig }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-externalconfig
+ namespace: {{ include "common.namespace" . }}
+data:
+ my_extra.cnf: |-
+ [mysqld]
+ lower_case_table_names = 1
+
+#{{ toYaml .Values.externalConfig | indent 4 }}
+#{{- end -}}
diff --git a/kubernetes/vid/charts/vid-galera/templates/pv.yaml b/kubernetes/vid/charts/vid-galera/templates/pv.yaml
new file mode 100644
index 0000000000..f682196931
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/templates/pv.yaml
@@ -0,0 +1,37 @@
+{{/*
+# Copyright 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
+spec:
+ capacity:
+ storage: {{ .Values.persistence.size}}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+ hostPath:
+ path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}
+{{- end -}}
diff --git a/kubernetes/vid/charts/vid-galera/templates/pvc.yaml b/kubernetes/vid/charts/vid-galera/templates/pvc.yaml
new file mode 100644
index 0000000000..c3de6e8150
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/templates/pvc.yaml
@@ -0,0 +1,48 @@
+{{/*
+# Copyright 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+ annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+ selector:
+ matchLabels:
+ name: {{ include "common.fullname" . }}
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+{{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/kubernetes/vid/charts/vid-galera/templates/secrets.yaml b/kubernetes/vid/charts/vid-galera/templates/secrets.yaml
new file mode 100644
index 0000000000..101a7eba79
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/templates/secrets.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+type: Opaque
+data:
+ db-root-password: {{ .Values.config.mariadbRootPassword | b64enc | quote }}
+ user-password: {{ default "" .Values.config.userPassword | b64enc | quote }} \ No newline at end of file
diff --git a/kubernetes/vid/charts/vid-galera/templates/service.yaml b/kubernetes/vid/charts/vid-galera/templates/service.yaml
new file mode 100644
index 0000000000..348baa9ddc
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/templates/service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+# name: {{ include "common.servicename" . }}
+ name: {{ .Values.service.name }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+ ports:
+ - name: {{ .Values.service.portName }}
+ port: {{ .Values.service.internalPort }}
+ clusterIP: None
+ selector:
+ app: {{ include "common.fullname" . }}
diff --git a/kubernetes/vid/charts/vid-galera/templates/statefulset.yaml b/kubernetes/vid/charts/vid-galera/templates/statefulset.yaml
new file mode 100644
index 0000000000..5470fdcc1c
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/templates/statefulset.yaml
@@ -0,0 +1,120 @@
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.fullname" . }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+spec:
+# serviceName: {{ include "common.fullname" . }}
+ serviceName: {{ .Values.service.name }}
+ replicas: {{ .Values.replicaCount }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.fullname" . }}
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ volumes:
+ {{- if .Values.externalConfig }}
+ - name: config
+ configMap:
+ name: {{ include "common.fullname" . }}-externalconfig
+ {{- end}}
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ imagePullSecrets:
+ - name: {{ include "common.namespace" . }}-docker-registry-key
+ containers:
+ - name: {{ include "common.fullname" . }}
+ image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy | quote}}
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: MYSQL_USER
+ value: {{ default "" .Values.config.userName | quote }}
+ - name: MYSQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "common.fullname" . }}
+ key: user-password
+ - name: MYSQL_DATABASE
+ value: {{ default "" .Values.config.mysqlDatabase | quote }}
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "common.fullname" . }}
+ key: db-root-password
+ ports:
+ - containerPort: {{ .Values.service.internalPort }}
+ name: {{ .Values.service.name }}
+ - containerPort: {{ .Values.service.sstPort }}
+ name: {{ .Values.service.sstName }}
+ - containerPort: {{ .Values.service.replicationPort }}
+ name: {{ .Values.service.replicationName }}
+ - containerPort: {{ .Values.service.istPort }}
+ name: {{ .Values.service.istName }}
+ readinessProbe:
+ exec:
+ command:
+ - /usr/share/container-scripts/mysql/readiness-probe.sh
+ initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readiness.periodSeconds }}
+ {{- if eq .Values.liveness.enabled true }}
+ livenessProbe:
+ exec:
+ command: ["mysqladmin", "ping"]
+ initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+ periodSeconds: {{ .Values.liveness.periodSeconds }}
+ timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+ {{- end }}
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ volumeMounts:
+ {{- if .Values.externalConfig }}
+ - mountPath: /etc/config
+ name: config
+ {{- end}}
+ - mountPath: /etc/localtime
+ name: localtime
+ readOnly: true
+{{- if .Values.persistence.enabled }}
+ - mountPath: /var/lib/mysql
+ name: {{ include "common.fullname" . }}-data
+ subPath: data
+ initContainers:
+ - name: mariadb-galera-prepare
+ image: "{{ include "common.repository" . }}/{{ .Values.imageInit }}"
+ command: ["sh", "-c", "chown -R 27:27 /var/lib/mysql"]
+ volumeMounts:
+ - name: {{ include "common.fullname" . }}-data
+ mountPath: /var/lib/mysql
+ volumeClaimTemplates:
+ - metadata:
+ name: {{ include "common.fullname" . }}-data
+ annotations:
+ {{- if .Values.persistence.storageClass }}
+ volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.storageClass | quote }}
+ {{- else }}
+ volume.alpha.kubernetes.io/storage-class: default
+ {{- end }}
+ spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+{{- end }}
diff --git a/kubernetes/vid/charts/vid-galera/values.yaml b/kubernetes/vid/charts/vid-galera/values.yaml
new file mode 100644
index 0000000000..31955753df
--- /dev/null
+++ b/kubernetes/vid/charts/vid-galera/values.yaml
@@ -0,0 +1,118 @@
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 302
+ persistence: {}
+ repository: nexus3.onap.org:10001
+
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+
+#repository: mysql
+repository: nexus3.onap.org:10001
+image: adfinissygroup/k8s-mariadb-galera-centos:v002
+imageInit: busybox
+pullPolicy: IfNotPresent
+
+# application configuration
+config:
+ mariadbRootPassword: secretpassword
+# userName: my-user
+# userPassword: my-password
+# mysqlDatabase: my-database
+ userName: vidadmin
+ userPassword: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
+ mysqlDatabase: vid_openecomp_epsdk
+
+
+# default number of instances in the StatefulSet
+# keep in mind that if the number is increased you need to update vid-galera-config-job.yaml so that the job will know to wait for all pods.
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: false
+
+readiness:
+ initialDelaySeconds: 15
+ periodSeconds: 10
+
+## Persist data to a persitent volume
+persistence:
+ enabled: false
+
+ ## A manually managed Persistent Volume and Claim
+ ## Requires persistence.enabled: true
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+ volumeReclaimPolicy: Retain
+
+ ## database data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ accessMode: ReadWriteOnce
+ size: 2Gi
+
+
+service:
+ internalPort: 3306
+ name: vid-galera
+ portName: vid-galera
+ sstPort: 4444
+ sstName: sst
+ replicationPort: 4567
+ replicationName: replication
+ istPort: 4568
+ istName: ist
+
+ingress:
+ enabled: false
+
+
+## Configure MariaDB-Galera with a custom my.cnf file
+## ref: https://mariadb.com/kb/en/mariadb/configuring-mariadb-with-mycnf/#example-of-configuration-file
+##
+#externalConfig: {}
+externalConfig: |-
+ lower_case_table_names = 1
+#resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ #
+ # Example:
+ # Configure resource requests and limits
+ # ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ # Minimum memory for development is 2 CPU cores and 4GB memory
+ # Minimum memory for production is 4 CPU cores and 8GB memory
+resources:
+ limits:
+ cpu: 2
+ memory: 4Gi
+ requests:
+ cpu: 2
+ memory: 4Gi
+
+# Name for mariadb-galera cluster - should be unique accross all projects or other clusters
+nameOverride: vid-galera
+
+# DNS name for mariadb-galera cluster - should be unique accross all projects other clusters
+#dnsnameOverride: mariadb-galera
diff --git a/kubernetes/vid/templates/check-job-completion-configmap.yaml b/kubernetes/vid/templates/check-job-completion-configmap.yaml
new file mode 100644
index 0000000000..b9c4488338
--- /dev/null
+++ b/kubernetes/vid/templates/check-job-completion-configmap.yaml
@@ -0,0 +1,83 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-check-job-completion
+ namespace: {{ include "common.namespace" . }}
+data:
+ vid_check_job_completion.py: |
+ #!/usr/bin/python
+ from __future__ import print_function
+ import time, argparse, logging, sys, os
+ import kubernetes.client
+ from kubernetes import client, config
+ from pprint import pprint
+
+ #extract env variables.
+ namespace = os.environ['NAMESPACE']
+ cert = os.environ['CERT']
+ host = os.environ['KUBERNETES_SERVICE_HOST']
+ token_path = os.environ['TOKEN']
+
+ with open(token_path, 'r') as token_file:
+ token = token_file.read().replace('\n', '')
+
+ client.configuration.api_key['authorization'] = token
+ client.configuration.api_key_prefix['authorization'] = 'Bearer'
+ client.configuration.host = "https://" + str(host)
+ client.configuration.ssl_ca_cert = cert
+
+ api_instance = client.BatchV1Api()
+
+ #setup logging
+ log = logging.getLogger(__name__)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
+ handler.setLevel(logging.INFO)
+ log.addHandler(handler)
+ log.setLevel(logging.INFO)
+
+
+ def is_ready(job_name):
+ log.info( "[INFO] Checking if " + job_name + " is completed")
+ pretty = True
+ job_status = False
+
+ try:
+ api_response = api_instance.read_namespaced_job_status(job_name, namespace, pretty=pretty)
+ except Exception as e:
+ print("Exception when calling BatchV1Api->read_namespaced_job_status: %s\n" % e)
+
+ pprint(api_response)
+ if api_response.status.succeeded == 1:
+ job_status_type = api_response.status.conditions[0].type
+ if job_status_type == "Complete":
+ job_status = True
+
+ print("[DBG] jobStatus: " + unicode(job_status))
+ return job_status
+
+
+ def main(args):
+ for job_name in args:
+ timeout = time.time() + 60 * 10
+ while True:
+ ready = is_ready(job_name)
+ if ready is True :
+ break
+ elif time.time() > timeout:
+ log.warning( "timed out waiting for '" + job_name + "' to be ready")
+ exit(1)
+ else:
+ time.sleep(5)
+
+
+ if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Process some names.')
+ parser.add_argument('--job-name', action='append', required=True, help='A container name')
+ args = parser.parse_args()
+ arg_dict = vars(args)
+
+ for arg in arg_dict.itervalues():
+ main(arg)
+
+
diff --git a/kubernetes/vid/templates/cluster-ready-configmap.yaml b/kubernetes/vid/templates/cluster-ready-configmap.yaml
new file mode 100644
index 0000000000..296db335a7
--- /dev/null
+++ b/kubernetes/vid/templates/cluster-ready-configmap.yaml
@@ -0,0 +1,89 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-cluster-ready-configmap
+ namespace: {{ include "common.namespace" . }}
+data:
+ vid_ready.py : |-
+ #!/usr/bin/python
+ from kubernetes import client, config
+ import time, argparse, logging, sys, os
+
+ #extract env variables.
+ namespace = os.environ['NAMESPACE']
+ cert = os.environ['CERT']
+ host = os.environ['KUBERNETES_SERVICE_HOST']
+ token_path = os.environ['TOKEN']
+
+ with open(token_path, 'r') as token_file:
+ token = token_file.read().replace('\n', '')
+
+ client.configuration.host = "https://" + host
+ client.configuration.ssl_ca_cert = cert
+ client.configuration.api_key['authorization'] = token
+ client.configuration.api_key_prefix['authorization'] = 'Bearer'
+
+ #setup logging
+ log = logging.getLogger(__name__)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
+ handler.setLevel(logging.INFO)
+ log.addHandler(handler)
+ log.setLevel(logging.INFO)
+
+
+ def is_ready(container_name):
+ log.info( "Checking if " + container_name + " is ready")
+ # config.load_kube_config() # for local testing
+ # namespace='onap-sdc' # for local testing
+ v1 = client.CoreV1Api()
+
+ ready = False
+
+ try:
+ response = v1.list_namespaced_pod(namespace=namespace, watch=False)
+
+ for i in response.items:
+ #log.info(i.metadata.name)
+ for s in i.status.container_statuses:
+ #log.info(s.name)
+ if i.metadata.name == container_name:
+ ready = s.ready
+ if not ready:
+ log.info( container_name + " is not ready.")
+ else:
+ log.info( container_name + " is ready!")
+ else:
+ continue
+ return ready
+ except Exception as e:
+ log.error("Exception when calling list_namespaced_pod: %s\n" % e)
+
+
+ def main(args):
+ # args are a list of container names
+ for container_name in args:
+ # 5 min, TODO: make configurable
+ timeout = time.time() + 60 * 10
+ while True:
+ ready = is_ready(container_name)
+ if ready is True:
+ break
+ elif time.time() > timeout:
+ log.warning( "timed out waiting for '" + container_name + "' to be ready")
+ exit(1)
+ else:
+ time.sleep(5)
+
+
+ if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Process some names.')
+ parser.add_argument('--container-name', action='append', required=True, help='A container name')
+ args = parser.parse_args()
+ arg_dict = vars(args)
+
+ for arg in arg_dict.itervalues():
+ main(arg)
+
+
+
diff --git a/kubernetes/vid/templates/dbcmd-configmap.yaml b/kubernetes/vid/templates/dbcmd-configmap.yaml
new file mode 100644
index 0000000000..7c06e748f4
--- /dev/null
+++ b/kubernetes/vid/templates/dbcmd-configmap.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-dbcmd-configmap
+ namespace: {{ include "common.namespace" . }}
+data:
+ db_cmd.sh : |-
+ #!/bin/sh
+ #mysql -uroot -p${MYSQL_ROOT_PASSWORD} -h${MYSQL_HOST} -P3306 < /db-config/vid-pre-init.sql
+ mysql -uvidadmin -p${MYSQL_PASSWORD} -h${MYSQL_HOST} -P3306 < /db-config/vid-pre-init.sql
+
diff --git a/kubernetes/vid/templates/deployment.yaml b/kubernetes/vid/templates/deployment.yaml
index 6da8dd2e3f..c8545df689 100644
--- a/kubernetes/vid/templates/deployment.yaml
+++ b/kubernetes/vid/templates/deployment.yaml
@@ -31,11 +31,18 @@ spec:
release: {{ .Release.Name }}
spec:
initContainers:
+#dd775k: This container checks if the job that wait for all db instances to be up and initializes the db had finished.
+# - command:
+# - /bin/sh
+# args:
+# - "-c"
+# - "sleep 1000000000m"
- command:
- - /root/ready.py
+ - python
args:
- - --container-name
- - {{ .Values.mariadb.nameOverride }}
+ - /tmp/vid-check-job-completion/vid_check_job_completion.py
+ - --job-name
+ - vid-config-galera
env:
- name: NAMESPACE
valueFrom:
@@ -45,6 +52,9 @@ spec:
image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
name: {{ include "common.name" . }}-readiness
+ volumeMounts:
+ - mountPath: /tmp/vid-check-job-completion
+ name: vid-check-job-completion
containers:
- name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
@@ -107,10 +117,9 @@ spec:
- name: VID_MYSQL_USER
value: "{{ .Values.config.vidmysqluser }}"
- name: VID_MYSQL_PASS
- valueFrom:
- secretKeyRef:
- name: {{ template "common.fullname" . }}
- key: vid-password
+ value: "{{ .Values.config.vidmysqlpassword }}"
+ #valueFrom:
+ # secretKeyRef: {name: {{ include "common.fullname" . }}, key: vid-password}
- name: VID_MYSQL_MAXCONNECTIONS
value: "{{ .Values.config.vidmysqlmaxconnections }}"
volumeMounts:
@@ -158,5 +167,10 @@ spec:
- name: vid-logback
configMap:
name: {{ include "common.fullname" . }}-log-configmap
+ - name: vid-check-job-completion
+ configMap:
+ name: {{ include "common.fullname" . }}-check-job-completion
imagePullSecrets:
- name: "{{ include "common.namespace" . }}-docker-registry-key"
+
+
diff --git a/kubernetes/vid/templates/galera-sql-configmap.yaml b/kubernetes/vid/templates/galera-sql-configmap.yaml
new file mode 100644
index 0000000000..ccda497887
--- /dev/null
+++ b/kubernetes/vid/templates/galera-sql-configmap.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "common.fullname" . }}-galera-sql-configmap
+ namespace: "{{ include "common.namespace" . }}"
+data:
+ vid-pre-init.sql: |-
+ CREATE TABLE IF NOT EXISTS `vid_openecomp_epsdk`.`schema_info` (
+ `SCHEMA_ID` VARCHAR(25) NOT NULL,
+ `SCHEMA_DESC` VARCHAR(75) NOT NULL,
+ `DATASOURCE_TYPE` VARCHAR(100) NULL DEFAULT NULL,
+ `CONNECTION_URL` VARCHAR(200) NOT NULL,
+ `USER_NAME` VARCHAR(45) NOT NULL,
+ `PASSWORD` VARCHAR(45) NULL DEFAULT NULL,
+ `DRIVER_CLASS` VARCHAR(100) NOT NULL,
+ `MIN_POOL_SIZE` INT(11) NOT NULL,
+ `MAX_POOL_SIZE` INT(11) NOT NULL,
+ `IDLE_CONNECTION_TEST_PERIOD` INT(11) NOT NULL)
+ ENGINE = InnoDB
+ DEFAULT CHARACTER SET = utf8;
+
diff --git a/kubernetes/vid/templates/vid-galera-config-job.yaml b/kubernetes/vid/templates/vid-galera-config-job.yaml
new file mode 100644
index 0000000000..b02d5b4913
--- /dev/null
+++ b/kubernetes/vid/templates/vid-galera-config-job.yaml
@@ -0,0 +1,70 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: vid-config-galera
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: vid-config-galera
+ release: {{ .Release.Name }}
+spec:
+ template:
+ metadata:
+ name: vid-galera-init
+ spec:
+ initContainers:
+#dd775k: This container checks that all galera instances are up before initializing it.
+ - name: vid-init-galera-readiness
+ image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+ imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+# - /bin/sh
+# args:
+# - "-c"
+# - "sleep 1000000000m"
+ command:
+ - python
+ args:
+ - /root/vid_ready.py
+ - --container-name
+ - {{ include "common.fullname" . }}-mariadb-galera-0
+ env:
+ - name: NAMESPACE
+ value: {{ include "common.namespace" . }}
+ volumeMounts:
+ - name: init-config
+ mountPath: /root/
+ containers:
+ - name: vid-config-galeradb
+ image: {{ .Values.mariadb_image }}
+ imagePullPolicy: "{{ .Values.pullPolicy }}"
+ volumeMounts:
+ - name: vid-db-config
+ mountPath: /db-config
+ - name: dbcmd-config
+ mountPath: /dbcmd-config
+ command:
+ - /bin/sh
+ args:
+ - -x
+ - /dbcmd-config/db_cmd.sh
+ env:
+ - name: MYSQL_PASSWORD
+ value: "{{ .Values.config.vidmysqlpassword }}"
+# valueFrom:
+# secretKeyRef:
+# name: {{ template "common.fullname" . }}
+# key: vid-password
+ - name: MYSQL_HOST
+ value: "{{ .Values.config.vidmysqlhost }}"
+ volumes:
+ - name: vid-db-config
+ configMap:
+ name: {{ include "common.fullname" . }}-galera-sql-configmap
+ - name: dbcmd-config
+ configMap:
+ name: {{ include "common.fullname" . }}-dbcmd-configmap
+ - name: init-config
+ configMap:
+ name: {{ include "common.fullname" . }}-cluster-ready-configmap
+ restartPolicy: Never
+
+
diff --git a/kubernetes/vid/values.yaml b/kubernetes/vid/values.yaml
index 3cb1a43b4e..8487743177 100644
--- a/kubernetes/vid/values.yaml
+++ b/kubernetes/vid/values.yaml
@@ -17,8 +17,9 @@
# Declare variables to be passed into your templates.
global:
nodePortPrefix: 302
+ repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
readinessRepository: oomk8s
- readinessImage: readiness-check:2.0.0
+ readinessImage: readiness-check:1.0.0
loggingRepository: docker.elastic.co
loggingImage: beats/filebeat:5.5.0
@@ -27,11 +28,11 @@ subChartsOnly:
# application image
repository: nexus3.onap.org:10001
-image: onap/vid:1.2.1
+image: onap/vid:2.0.0
pullPolicy: Always
-# flag to enable debugging - application support required
-debugEnabled: false
+# mariadb image for initializing
+mariadb_image: library/mariadb:10
# application configuration
config:
@@ -44,7 +45,7 @@ config:
vidmsopass: OBF:1ih71i271vny1yf41ymf1ylz1yf21vn41hzj1icz
msodme2serverurl: http://localhost:8081
vidcontactuslink: https://todo_contact_us_link.com
- vidmysqlhost: vid-mariadb
+ vidmysqlhost: vid-galera
vidmysqlport: "3306"
vidmysqldbname: vid_openecomp_epsdk
vidmysqluser: vidadmin
@@ -52,10 +53,9 @@ config:
logstashServiceName: log-ls
logstashPort: 5044
-
# subchart configuration
-mariadb:
- nameOverride: vid-mariadb
+mariadb-galera:
+ nameOverride: vid-mariadb-galera
# default number of instances
replicaCount: 1
diff --git a/kubernetes/vid/charts/mariadb/Chart.yaml b/kubernetes/vnfsdk/charts/vnfsdk-postgres/Chart.yaml
index 1f7de32fdf..db7201d7c6 100644
--- a/kubernetes/vid/charts/mariadb/Chart.yaml
+++ b/kubernetes/vnfsdk/charts/vnfsdk-postgres/Chart.yaml
@@ -13,6 +13,6 @@
# limitations under the License.
apiVersion: v1
-description: VID MariaDB Service
-name: mariadb
+description: ONAP VNFSDK Postgres Database
+name: vnfsdk-postgres
version: 2.0.0
diff --git a/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/deployment.yaml b/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/deployment.yaml
new file mode 100644
index 0000000000..88d6238f35
--- /dev/null
+++ b/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/deployment.yaml
@@ -0,0 +1,44 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: {{ include "common.fullname" . }}
+ namespace: {{ include "common.namespace" . }}
+ labels:
+ app: {{ include "common.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ include "common.name" . }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "common.name" . }}
+ release: {{ .Release.Name }}
+ name: {{ include "common.name" . }}
+ spec:
+ hostname: {{ include "common.name" . }}
+ containers:
+ - args:
+ image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+ imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
+ name: {{ include "common.name" . }}
+ imagePullSecrets:
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/vid/charts/mariadb/templates/service.yaml b/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/service.yaml
index 35333c441e..f6208cf965 100644
--- a/kubernetes/vid/charts/mariadb/templates/service.yaml
+++ b/kubernetes/vnfsdk/charts/vnfsdk-postgres/templates/service.yaml
@@ -23,9 +23,16 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
+ type: {{ .Values.service.type }}
ports:
+ {{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.internalPort }}
- name: {{ .Values.service.portName }}
+ nodePort: {{ .Values.global.nodePortPrefix | default "302" }}{{ .Values.service.nodePort }}
+ {{- else -}}
+ - port: {{ .Values.service.externalPort }}
+ targetPort: {{ .Values.service.internalPort }}
+ {{- end}}
+ name: {{ .Values.service.portName | default "http" }}
selector:
app: {{ include "common.name" . }}
release: {{ .Release.Name }}
diff --git a/kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml b/kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml
new file mode 100644
index 0000000000..38edd145aa
--- /dev/null
+++ b/kubernetes/vnfsdk/charts/vnfsdk-postgres/values.yaml
@@ -0,0 +1,63 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 302
+ repository: nexus3.onap.org:10001
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+# application image
+repository: nexus3.onap.org:10001
+image: onap/vnfsdk/refrepo/postgres:1.1.1
+pullPolicy: Always
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ # necessary to disable liveness probe when setting breakpoints
+ # in debugger so K8s doesn't restart unresponsive container
+ enabled: true
+
+readiness:
+ initialDelaySeconds: 10
+ periodSeconds: 10
+
+service:
+ type: ClusterIP
+ name: vnfsdk-postgres
+ portName: vnfsdk-postgres
+ internalPort: 5432
+ externalPort: 5432
+
+ingress:
+ enabled: false
diff --git a/kubernetes/vnfsdk/requirements.yaml b/kubernetes/vnfsdk/requirements.yaml
index 3b2a02a480..ce82a2f838 100644
--- a/kubernetes/vnfsdk/requirements.yaml
+++ b/kubernetes/vnfsdk/requirements.yaml
@@ -16,6 +16,3 @@ dependencies:
- name: common
version: ~2.0.0
repository: '@local'
- - name: postgres
- version: ~2.0.0
- repository: '@local'
diff --git a/kubernetes/vnfsdk/templates/deployment.yaml b/kubernetes/vnfsdk/templates/deployment.yaml
index d8a3d360df..8220553cf6 100644
--- a/kubernetes/vnfsdk/templates/deployment.yaml
+++ b/kubernetes/vnfsdk/templates/deployment.yaml
@@ -38,7 +38,7 @@ spec:
- /root/ready.py
args:
- --container-name
- - "{{ .Values.postgres.nameOverride }}"
+ - "{{ .Values.vnfsdkpostgres.nameOverride }}"
env:
- name: NAMESPACE
valueFrom:
@@ -52,6 +52,9 @@ spec:
- image: "{{ include "common.repository" . }}/{{ .Values.image }}"
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
name: {{ include "common.name" . }}
+ env:
+ - name: POSTGRES_SERVICE_HOST
+ value: "$(VNFSDK_POSTGRES_SERVICE_HOST)"
readinessProbe:
tcpSocket:
port: {{ .Values.service.internalPort }}
diff --git a/kubernetes/vnfsdk/values.yaml b/kubernetes/vnfsdk/values.yaml
index 200821c434..edca2ece6c 100644
--- a/kubernetes/vnfsdk/values.yaml
+++ b/kubernetes/vnfsdk/values.yaml
@@ -31,22 +31,9 @@ repository: nexus3.onap.org:10001
image: onap/vnfsdk/refrepo:1.1.1
pullPolicy: Always
-postgres:
+#subchart name
+vnfsdkpostgres:
nameOverride: vnfsdk-postgres
- service:
- name: vnfsdk-dbset
- name2: vnfsdk-dbpri
- name3: vnfsdk-dbrep
- nfsprovisionerPrefix: vnfsdk
- persistence:
- mountSubPath: vnfsdk/data
- mountInitPath: vnfsdk
- enabled: true
- disableNfsProvisioner: true
- container:
- name:
- primary: vnfsdk-dbpri
- replica: vnfsdk-dbrep
# flag to enable debugging - application support required
debugEnabled: false
diff --git a/message-router-blueprint.yaml b/message-router-blueprint.yaml
deleted file mode 100644
index 98a283020b..0000000000
--- a/message-router-blueprint.yaml
+++ /dev/null
@@ -1,532 +0,0 @@
-tosca_definitions_version: cloudify_dsl_1_3
-
-description: >
- This example deploys the OOM Message Router application. Each service/deployment pair is associated with a single Kubernetes node.
- Node template naming convention: PROVISIONINGAPI_RESOURCETYPE_APPLICATIONCOMPONENT
- The following resources are created:
- - Security Group
- - openstack_port_zookeeper - NIC that connects to the Openstack Server
- - openstack_port_kafka - NIC that connects to the Openstack Server
- - openstack_port_dmaap - NIC that connects to the Openstack Server
- - openstack_server_zookeeper - a VM that a Kubernetes Node is installed on.
- - openstack_server_kafka - a VM that a Kubernetes Node is installed on.
- - openstack_server_dmaap - a VM that a Kubernetes Node is installed on.
- - kubernetes_node_zookeeper - a Kubernetes node that will join the Kubernetes cluster.
- - kubernetes_node_kafka - a Kubernetes node that will join the Kubernetes cluster.
- - kubernetes_node_dmaap - a Kubernetes node that will join the Kubernetes cluster.
- - kubernetes_deployment_zookeeper - a Kubernetes deployment.
- - kubernetes_deployment_kafka - a Kubernetes deployment.
- - kubernetes_deployment_dmaap - a Kubernetes deployment.
- - kubernetes_service_zookeeper - a Kubernetes service.
- - kubernetes_service_kafka - a Kubernetes service.
- - kubernetes_service_dmaap - a Kubernetes service.
- The following pre-setup steps are assumed, but not required:
- - Create Cloudify Example Environment: https://github.com/cloudify-examples/cloudify-environment-setup.
- - Create Kubernetes Cluster: https://github.com/cloudify-examples/simple-kubernetes-blueprint.
-
-imports:
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
- # Plugin required: https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-openstack-plugin/2.2.0/plugin.yaml
- # Plugin required: https://github.com/cloudify-incubator/cloudify-utilities-plugin/releases/download/1.2.5/cloudify_utilities_plugin-1.2.5-py27-none-linux_x86_64-centos-Core.wgn
- - https://raw.githubusercontent.com/cloudify-incubator/cloudify-utilities-plugin/1.2.5/plugin.yaml
- # Plugin required: https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/releases/download/1.2.0/cloudify_kubernetes_plugin-1.2.0-py27-none-linux_x86_64-centos-Core.wgn
- - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.0/plugin.yaml
- # Plugin required: http://repository.cloudifysource.org/cloudify/wagons/cloudify-diamond-plugin/1.3.5/cloudify_diamond_plugin-1.3.5-py27-none-linux_x86_64-centos-Core.wgn
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-diamond-plugin/1.3.5/plugin.yaml
- # Plugin required: http://repository.cloudifysource.org/cloudify/wagons/cloudify-fabric-plugin/1.5/cloudify_fabric_plugin-1.5-py27-none-linux_x86_64-centos-Core.wgn
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-fabric-plugin/1.5/plugin.yaml
- - cloudify/types/kubernetes.yaml
-
-inputs:
-
- configuration_file_content:
- type: string
-
- NS:
- default: oom
-
- image:
- description: Image to be used when launching agent VMs
- default: { get_secret: centos_core_image }
-
- flavor:
- description: Flavor of the agent VMs
- default: { get_secret: large_image_flavor }
-
- agent_user:
- description: >
- User for connecting to agent VMs
- default: centos
-
- app_name:
- default: message-router
-
- security_group:
- default: { concat: [ 'secgrp_', { get_input: app_name } ] }
-
-dsl_definitions:
-
- openstack_config: &openstack_config
- username: { get_secret: keystone_username }
- password: { get_secret: keystone_password }
- tenant_name: { get_secret: keystone_tenant_name }
- auth_url: { get_secret: keystone_url }
- region: { get_secret: region }
-
-groups:
-
- openstack_server_port_group_zookeeper:
- members:
- - openstack_server_zookeeper
- - openstack_port_zookeeper
-
- openstack_server_port_group_kafka:
- members:
- - openstack_server_kafka
- - openstack_port_kafka
-
- openstack_server_port_group_dmaap:
- members:
- - openstack_server_dmaap
- - openstack_port_dmaap
-
-policies:
-
- openstack_server_port_policies_scaling:
- type: cloudify.policies.scaling
- properties:
- default_instances: 1
- targets:
- - openstack_server_port_group_zookeeper
- - openstack_server_port_group_kafka
- - openstack_server_port_group_dmaap
-
-node_templates:
-
- kubernetes_service_zookeeper:
- type: cloudify.kubernetes.resources.Service
- properties:
- definition:
- apiVersion: v1
- kind: Service
- metadata:
- name: zookeeper
- labels:
- app: zookeeper
- spec:
- ports:
- - name: zookeeper1
- port: 2181
- selector:
- app: zookeeper
- clusterIP: None
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: k8s
- - type: cloudify.relationships.depends_on
- target: kubernetes_deployment_zookeeper
-
- kubernetes_deployment_zookeeper:
- type: cloudify.kubernetes.resources.Deployment
- properties:
- definition:
- file:
- resource_path: kubernetes/message-router/message-router-zookeeper.yaml
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: k8s
- - type: cloudify.relationships.depends_on
- target: kubernetes_node_zookeeper
-
- kubernetes_node_zookeeper:
- type: cloudify.nodes.Kubernetes.Node
- relationships:
- - type: cloudify.relationships.contained_in
- target: openstack_server_zookeeper
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- implementation: fabric.fabric_plugin.tasks.run_task
- inputs:
- task_properties:
- hostname: { get_attribute: [ SELF, hostname ] }
- labels:
- app: zookeeper
-
- openstack_server_zookeeper:
- type: cloudify.openstack.nodes.Server
- properties: &openstack_server_properties
- openstack_config: *openstack_config
- agent_config:
- user: { get_input: agent_user }
- install_method: remote
- port: 22
- key: { get_secret: agent_key_private }
- server:
- key_name: ''
- image: ''
- flavor: ''
- management_network_name: { get_property: [ private_network, resource_id ] }
- relationships:
- - type: cloudify.relationships.contained_in
- target: k8s_node_scaling_tier
- - target: openstack_port_zookeeper
- type: cloudify.openstack.server_connected_to_port
- - type: cloudify.relationships.depends_on
- target: cloud_init_openstack_server
- interfaces: &openstack_server_interfaces
- cloudify.interfaces.lifecycle:
- create:
- inputs:
- args:
- image: { get_input: image }
- flavor: { get_input: flavor }
- userdata: { get_attribute: [ cloud_init_openstack_server, cloud_config ] }
- cloudify.interfaces.monitoring_agent:
- install:
- implementation: diamond.diamond_agent.tasks.install
- inputs:
- diamond_config:
- interval: 1
- start: diamond.diamond_agent.tasks.start
- stop: diamond.diamond_agent.tasks.stop
- uninstall: diamond.diamond_agent.tasks.uninstall
- cloudify.interfaces.monitoring:
- start:
- implementation: diamond.diamond_agent.tasks.add_collectors
- inputs:
- collectors_config:
- CPUCollector: {}
- MemoryCollector: {}
- LoadAverageCollector: {}
- DiskUsageCollector:
- config:
- devices: x?vd[a-z]+[0-9]*$
- NetworkCollector: {}
- ProcessResourcesCollector:
- config:
- enabled: true
- unit: B
- measure_collector_time: true
- cpu_interval: 0.5
- process:
- hyperkube:
- name: hyperkube
-
- openstack_port_zookeeper:
- type: cloudify.openstack.nodes.Port
- properties:
- openstack_config: *openstack_config
- relationships: &openstack_port_relationships
- - type: cloudify.relationships.contained_in
- target: k8s_node_scaling_tier
- - type: cloudify.relationships.connected_to
- target: private_network
- - type: cloudify.relationships.depends_on
- target: private_subnet
- - type: cloudify.openstack.port_connected_to_security_group
- target: security_group
-
- kubernetes_service_kafka:
- type: cloudify.kubernetes.resources.Service
- properties:
- definition:
- apiVersion: v1
- kind: Service
- metadata:
- name: global-kafka
- labels:
- app: global-kafka
- spec:
- ports:
- - name: kafka1
- port: 9092
- selector:
- app: global-kafka
- clusterIP: None
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: k8s
- - type: cloudify.relationships.depends_on
- target: kubernetes_deployment_kafka
-
- kubernetes_deployment_kafka:
- type: cloudify.kubernetes.resources.Deployment
- properties:
- definition:
- file:
- resource_path: kubernetes/message-router/message-router-kafka.yaml
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: k8s
- - type: cloudify.relationships.depends_on
- target: kubernetes_node_kafka
- - type: cloudify.relationships.depends_on
- target: kubernetes_deployment_zookeeper
-
- kubernetes_node_kafka:
- type: cloudify.nodes.Kubernetes.Node
- relationships:
- - type: cloudify.relationships.contained_in
- target: openstack_server_kafka
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- implementation: fabric.fabric_plugin.tasks.run_task
- inputs:
- task_properties:
- hostname: { get_attribute: [ SELF, hostname ] }
- labels:
- app: global-kafka
-
- openstack_server_kafka:
- type: cloudify.openstack.nodes.Server
- properties: *openstack_server_properties
- relationships:
- - type: cloudify.relationships.contained_in
- target: k8s_node_scaling_tier
- - target: openstack_port_kafka
- type: cloudify.openstack.server_connected_to_port
- - type: cloudify.relationships.depends_on
- target: cloud_init_openstack_server
- interfaces: *openstack_server_interfaces
-
- openstack_port_kafka:
- type: cloudify.openstack.nodes.Port
- properties:
- openstack_config: *openstack_config
- relationships: *openstack_port_relationships
-
- kubernetes_service_dmaap:
- type: cloudify.kubernetes.resources.Service
- properties:
- definition:
- apiVersion: v1
- kind: Service
- metadata:
- name: dmaap
- labels:
- app: dmaap
- version: 1.0.0
- spec:
- ports:
- - name: mr1
- port: 3904
- nodePort: 30227
- - name: mr2
- port: 3905
- nodePort: 30226
- selector:
- app: dmaap
- type: NodePort
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: k8s
- - type: cloudify.relationships.depends_on
- target: kubernetes_deployment_dmaap
-
- kubernetes_deployment_dmaap:
- type: cloudify.kubernetes.resources.Deployment
- properties:
- definition:
- file:
- resource_path: kubernetes/message-router/message-router-dmaap.yaml
- relationships:
- - type: cloudify.kubernetes.relationships.managed_by_master
- target: k8s
- - type: cloudify.relationships.depends_on
- target: kubernetes_node_dmaap
- - type: cloudify.relationships.depends_on
- target: kubernetes_deployment_zookeeper
-
- kubernetes_node_dmaap:
- type: cloudify.nodes.Kubernetes.Node
- relationships:
- - type: cloudify.relationships.contained_in
- target: openstack_server_dmaap
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- implementation: fabric.fabric_plugin.tasks.run_task
- inputs:
- task_properties:
- hostname: { get_attribute: [ SELF, hostname ] }
- labels:
- app: global-dmaap
-
- openstack_server_dmaap:
- type: cloudify.openstack.nodes.Server
- properties: *openstack_server_properties
- relationships:
- - type: cloudify.relationships.contained_in
- target: k8s_node_scaling_tier
- - target: openstack_port_dmaap
- type: cloudify.openstack.server_connected_to_port
- - type: cloudify.relationships.depends_on
- target: cloud_init_openstack_server
- interfaces: *openstack_server_interfaces
-
- openstack_port_dmaap:
- type: cloudify.openstack.nodes.Port
- properties:
- openstack_config: *openstack_config
- relationships: *openstack_port_relationships
-
- security_group:
- type: cloudify.openstack.nodes.SecurityGroup
- properties:
- openstack_config: *openstack_config
- security_group:
- name: { get_input: security_group }
- description: kubernetes master security group
- rules:
- rules:
- - remote_ip_prefix: 0.0.0.0/0
- port: 22
- - remote_ip_prefix: 0.0.0.0/0
- port: 53
- - remote_ip_prefix: 0.0.0.0/0
- port: 53
- protocol: udp
- - remote_ip_prefix: 0.0.0.0/0
- port: 80
- - remote_ip_prefix: 0.0.0.0/0
- port: 443
- - remote_ip_prefix: 0.0.0.0/0
- port: 2379
- - remote_ip_prefix: 0.0.0.0/0
- port: 4001
- - remote_ip_prefix: 0.0.0.0/0
- port: 4789
- - remote_ip_prefix: 0.0.0.0/0
- port: 6443
- - remote_ip_prefix: 0.0.0.0/0
- port: 6783
- protocol: tcp
- - remote_ip_prefix: 0.0.0.0/0
- port: 6783
- protocol: udp
- - remote_ip_prefix: 0.0.0.0/0
- port: 6784
- protocol: tcp
- - remote_ip_prefix: 0.0.0.0/0
- port: 6784
- protocol: udp
- - remote_ip_prefix: 0.0.0.0/0
- port: 8000
- - remote_ip_prefix: 0.0.0.0/0
- port: 8080
- - remote_ip_prefix: 0.0.0.0/0
- port: 9090
- - remote_ip_prefix: 0.0.0.0/0
- port: 10250
- - remote_ip_prefix: 0.0.0.0/0
- port: 2181
- - remote_ip_prefix: 0.0.0.0/0
- port: 9092
- - remote_ip_prefix: 0.0.0.0/0
- port: 3904
- - remote_ip_prefix: 0.0.0.0/0
- port: 30227
- - remote_ip_prefix: 0.0.0.0/0
- port: 3905
- - remote_ip_prefix: 0.0.0.0/0
- port: 30226
-
- private_subnet:
- type: cloudify.openstack.nodes.Subnet
- properties:
- openstack_config: *openstack_config
- use_external_resource: true
- resource_id: { get_secret: private_subnet_name }
- relationships:
- - target: private_network
- type: cloudify.relationships.contained_in
-
- private_network:
- type: cloudify.openstack.nodes.Network
- properties:
- openstack_config: *openstack_config
- use_external_resource: true
- resource_id: { get_secret: private_network_name }
-
- external_network:
- type: cloudify.openstack.nodes.Network
- properties:
- openstack_config: *openstack_config
- use_external_resource: true
- resource_id: { get_secret: external_network_name }
-
- cloud_init_openstack_server:
- type: cloudify.nodes.CloudInit.CloudConfig
- properties:
- resource_config:
- groups:
- - docker
- users:
- - name: { get_input: agent_user }
- primary-group: wheel
- groups: docker
- shell: /bin/bash
- sudo: ['ALL=(ALL) NOPASSWD:ALL']
- ssh-authorized-keys:
- - { get_secret: agent_key_public }
- write_files:
- - path: /etc/yum.repos.d/kubernetes.repo
- owner: root:root
- permissions: '0444'
- content: |
- # installed by cloud-init
- [kubernetes]
- name=Kubernetes
- baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
- enabled=1
- gpgcheck=1
- repo_gpgcheck=1
- gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
- https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- packages:
- - [epel-release]
- - [gcc]
- - [python-dev]
- - [python-wheel]
- - [python-setuptools]
- - [libffi-devel]
- - [python-devel]
- - [openssl-devel]
- - [docker, 1.12.6-28.git1398f24.el7.centos]
- - [kubelet, 1.6.4-0]
- - [kubeadm, 1.6.4-0]
- - [kubectl, 1.6.4-0]
- - [kubernetes-cni, 0.5.1-0]
- - [git]
- - [wget]
- runcmd:
- - systemctl enable docker
- - systemctl start docker
- - systemctl enable kubelet
- - systemctl start kubelet
- - yum install -y python-pip
- - pip install --upgrade pip
- - pip install docker-compose
- - pip install backports.ssl_match_hostname --upgrade
- - mkdir -p /tmp/oom/
- - git clone https://gerrit.onap.org/r/oom.git /tmp/oom
- - sleep 15
- - chmod 755 /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh
- - sed -i -e "s/\.\/docker_files/\/tmp\/oom\/kubernetes\/config\/docker\/init\/src\/config\/message-router\/dcae-startup-vm-message-router\/docker_files/g" /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh
- - sed -i -e "s/\/opt\/docker\/docker-compose/\/bin\/docker-compose/g" /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh
- - mv /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/__docker-compose.yml /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/docker-compose.yml
- - mv /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/__MsgRtrApi.properties /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/MsgRtrApi.properties
- - sh -c /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh
-
- k8s_node_scaling_tier:
- type: cloudify.nodes.Root
-
- k8s:
- type: cloudify.kubernetes.nodes.Master
- properties:
- configuration:
- file_content: { get_input: configuration_file_content }
-