diff options
author | Hong Guan <hg4105@att.com> | 2017-08-14 13:28:23 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@onap.org> | 2017-08-14 13:28:23 +0000 |
commit | ca98432389ae8b432bafdf53ffa9b6b077883b91 (patch) | |
tree | bdf2d381dd640c53a526296ae0aa04b4b7c949a4 | |
parent | 5197e2e88144a536068fb75e912f91b20e39db2e (diff) | |
parent | 5e1853a28f9ca1d13280db06fdb9edaf33fd7354 (diff) |
Merge "[OOM-71] Message Router Kubernetes-Tosca Templates"
-rw-r--r-- | cloudify/inputs/message-router-blueprint.yaml.example | 25 | ||||
-rw-r--r-- | cloudify/scripts/configure_node.py | 49 | ||||
-rw-r--r-- | cloudify/scripts/create.py | 72 | ||||
-rw-r--r-- | cloudify/scripts/tasks.py | 24 | ||||
-rw-r--r-- | cloudify/types/kubernetes.yaml | 91 | ||||
-rw-r--r-- | message-router-blueprint.yaml | 532 |
6 files changed, 793 insertions, 0 deletions
diff --git a/cloudify/inputs/message-router-blueprint.yaml.example b/cloudify/inputs/message-router-blueprint.yaml.example new file mode 100644 index 0000000000..ecab0eca2c --- /dev/null +++ b/cloudify/inputs/message-router-blueprint.yaml.example @@ -0,0 +1,25 @@ +join_command: kubeadm join --token f66aad.cb001cc90bd69b38 192.168.120.6:6443 +kubernetes_master_ip: 192.168.120.6 +flavor: 3 +configuration_file_content: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFM01EZ3dNekEzTXpJek4xb1hEVEkzTURnd01UQTNNekl6TjFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTUF4Ckxzdmkyek1ZU0pjaG5QWjVDUkJQTnBLbklHTDlHY1FYRFZnQjNEc0FuaTVpc2VadDlmeENtOURxSS94NkkrRGoKSlA5ZkNNbEo5a3Z1OGgvZFF4dWJFbHhaSmZkdkFqY3p0RlVWdGpaVGREcTFDTk81UENOcnNRSkdQVS9HWDNzagpRWmlHYVNPYmJJOGQ0d2Z0bkI5dE51ZDNXMnZDZmZJUzNCNU9YMVRVMzBjVE1xVnJjZ0FLT2EvR2FUK01WV3c2CkVHZDErWmVoYWZBUWJDeG1jbHRpMlJHSUNVakpLc2xqUFRUS3JTNXJVMkwxUjdYSFd3SUhyWWtuZ05SQllwTkQKaXk3UjlCZy93S1dkMVNYVVpUODU3eE8xdjB0aU9ucFJML0tGS2IrcHBKUnVITDVORE9TbTJZSzR1OFI3MjFudgpyYVNOSTk2K0VoVGhWL2U4VWU4Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFFOUFhbWQzL0JmRVAyRitSeXJRdXp2TGQzSWEKbGZoR3Fab1JqZWFId1pnanVwQVh0VXdzd0JiYkFhZm5XMXJDd3VDVldRYXVYVWhyZ1VNelcvbEQ2blBYYWtUcgpwWTJ6NG83ZG90dlZSekVtN0dmWllMUUs2cW9Wczk4TTRjS3RhdjVlL3VVcXFGckY2NVYzUE1QV3M1NGp2Q1U5CklFTDJ0ZmQ1TzFrMGlEcXFtdWdBVjgxblNOdHlnK0FZN3o5SVdXRFhKcTNUQ1RHQnZLQmxCdzNWSDVBbnQxblEKSFNrSmJ0ZGhpaFA0KzU0emlKZEhPNFcxekFGam4zUVpIZVZDNU8rSkdSOWNZWW5aTHc4ZC92YmxZeXRpTWZPVwoyN3VzcW1RbmtPZDliNXozaTlvRDBvUUYyY1RObk85NzJkeTBuTmhiK0VMclpGNEpKUS9XVjB0Z083ST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://192.168.120.6:6443 + name: kubernetes + contexts: + - context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes + current-context: kubernetes-admin@kubernetes + kind: Config + preferences: {} + users: + - name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJSm9EQWNpYWVkSVF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4TnpBNE1ETXdOek15TXpkYUZ3MHhPREE0TURNd056TXlNemxhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQW1xd3duNlU0TFVFQkphMWUKQzIrUjM0K0oyZ3BBYTJ0aDVOZXdnS2NhUWQwaE5BODNjNE1IMDFVUjU3b3ByNUNFczFQVmVwMkZtczlpaFRITwo0SUpINjkxUVQvTUVJZE5iWTl0RXdDV21ia1lMbFBjc09yclErYTl5VGdxYm5IWjBONnJOdUZ4dDB2alRPSUR1CmRDMnBQR3dFMW5kaHd1VVB3UUFxeS9SVjN6MTgzRnoyOWZuVHg3UXdWR0J4Rk84Z0JxRTFRYTVYenhIZ0lTQ2sKSkJka2FtRUFhSjl6NHgwZjFmbHQ4MG4wZ3RHRitkbUZuMThkbGwzZmoreGpNOGxqS21QZnRNdlc4MXF0bkVnZApoU1I3bWdMODlUckx3SmFtNkxmVmZhN29CWWJvWUMyT2gvKytZMkpwOXRpRkMyZ1ExeVBXSHJBMVZJTVBQUWdkCk8yTGNuUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFIZ2ZjRVd6R08yQ1p0cEJFbUxzbllXWTJmdGlSOU1BNHY5OQpXVFhBUzNzZ3VJTm43WktUUElSeTVyTmVmSTVhS1ltMWMyU0w5ZzJlM0JpeFZUUHRsYmRWczVBanMxWnVWRGRkClhmYk93blozcnBQbDZoenpxSVh2VmxsNzI4VC9hZDRJbmZ6SFVtT1o3YSs4enBIUS9EREZKLzR1aDYrSVlnSFkKVzBBQmFXMXpOc3lQSzNhK3paV0ROSVFvNS8yTVFJYkNwN1ZQOHhobUUxZ1diY1BxVmJ1YVZJY09IZjkvUVhqeQpKZTdoK2tzSEJPNUFZczRZOFZBYXlVb0t4bTJZbmNkZHJGTWl4STRKNEkrSUp5aGRPdE5TNG1lTmcyMXIwN3U2ClZkL2E2SGt6ekxFcmdqWkxzVktIK0RUMTVhTWNSZGg3OVE1YXo1ckh1Qm5vZ0RYejFtVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBbXF3d242VTRMVUVCSmExZUMyK1IzNCtKMmdwQWEydGg1TmV3Z0tjYVFkMGhOQTgzCmM0TUgwMVVSNTdvcHI1Q0VzMVBWZXAyRm1zOWloVEhPNElKSDY5MVFUL01FSWROYlk5dEV3Q1dtYmtZTGxQY3MKT3JyUSthOXlUZ3FibkhaME42ck51Rnh0MHZqVE9JRHVkQzJwUEd3RTFuZGh3dVVQd1FBcXkvUlYzejE4M0Z6Mgo5Zm5UeDdRd1ZHQnhGTzhnQnFFMVFhNVh6eEhnSVNDa0pCZGthbUVBYUo5ejR4MGYxZmx0ODBuMGd0R0YrZG1GCm4xOGRsbDNmait4ak04bGpLbVBmdE12VzgxcXRuRWdkaFNSN21nTDg5VHJMd0phbTZMZlZmYTdvQllib1lDMk8KaC8rK1kySnA5dGlGQzJnUTF5UFdIckExVklNUFBRZ2RPMkxjblFJREFRQUJBb0lCQUhxbjMrdEo5ekdUNGhnQgowcGxaQWFINnp3TzBxMzlENlo2ekdNbjlPY3BQVkp4WEVNOHVjbzg1WC9pV1hhWlhBWlMvLzRPNzFRNStOUStRCi94QjA0Qm9BS0VjdVhQR0NEWEF6bXVLUk9Oa3IvTlZGNmJJdElibFBVMkxsOEo3MEpKZGNnTVVacnhIbHRvS1IKWkFlSGlqUmJLTDcyYnZWQjl1dERlYXpCZHpPTzhHbG5VaU5WTWRoaVowazRNbEFobmV0ZjNNazFHbXFjbHJyNApISjIwbElSR2NWTWRqZm1OaThFVG5LckRwWWNvRUZ5QnozMVN2RHVTaU1GVm9sUWpZMkU1N2kyd1RVdDlSU1NjCk5oRlpEM2s1dkxwMFNIcjZtSXRURW1jY0w2VDdzTDh0UXNGLzhaZG9aUXpoRzRXUU5IZ00yUldsdEN4eklCNy8KT3czUk5OVUNnWUVBelcvNVdkWk5QV2hsRXR2VGQ4a1FjbUF3VkVYaGgrU2NvajhpVGdHbW5GNXhsSGhWVjZUdwpVYzRtRmhGU0JBSGpRWlN5Vm1NTDkwMWU1UE1aOXVRQ05Xb0pWVzU4cUI0VDJsRXNKRjJkdXdRSVZDL2g4QkhiClJ4TVZLaDJhdHZKR2dHbWsxME5tblZTYmxQVVpDVVBRWFN4R1B5VXh0UStSSmRUNHVPSm43QXNDZ1lFQXdMMnIKNUlQeFRvTHplZ254b0I5Z0RnbnFBazB3b3NicHg3V2pJY2RpdnlWNGpib2U3TmlYbEpQZXJ3MmExd2M2Ky96VgpSeVpkUjN2U1lrUnczNnp4Q1N0UHZhRFVMT053eDhtSjVRVVIwYXdReEQ4R1ZneHZmVTBhYzdqeW04L2laZWpjCkk5V1UxOXo0eEk3akIvMXNYOFpFTWFtb1RXOGVUM0I4aWNPUEd2Y0NnWUVBcWtOZmVlRnU2cklXOHVaV0FUVVcKK0hSWUdjQkJCd3VsOWFJMW9Fa2wrUHNkVDF2Yi8yT24rV1RObEFTTzROdGZxZjYvUDNHZmZUc1dwdElFZHViSwpIZExnSVhvTXZwa1BBeVc3Vy9ocXZaQytCbWdZN1lzZkhXem5ZWnhmbWJoNlRmdEFyMWdoTjh2amxqVDhwdjBaCk45OTE2T2UrcHIxY0l1cTdxUitiMmJrQ2dZQUxMYlQvZnV1SzZ5dGw0NWZBK3JEZWY1S3o2WGd0cUsyOGFIdDYKcFE3RUdVOUJvUTdVRzhmRzFVQ3dGSERya2I3SkNLUHlDWGFWZzhmeTdSZEMwY3YxQlM4Tngzc20wMVlpTUQwdwpMRGdaV2dwcTUyRGRzc0R3bW4welF3SEhLYXB1MEsrYjRISk9oc0ZpM1FxcjF2WG5KQittWmJtZUxCaXM4TkE0ClNVQk9od0tCZ0drTkJhaTFWVU9RaGVYU3Mrb3JPVWxDNDNsenlBanJZa2dod0lRd25LTWpqK2lrak9oNmtqc3IKL1lDTHVRcWNBYWNKVEF2VmZOVGcyZldyUUJTODlwVjlFRVBnV0JIQmt4a1JsNnY0WTFQZVRqOVVzeVdzaHljYQpXRkFHSkpDMXg1NWg2OWdFWSsyR1NpUEQ0MzNrQUZUd3NBUEhPbmRwdmlOTVBLek9KTldnCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== + + diff --git a/cloudify/scripts/configure_node.py b/cloudify/scripts/configure_node.py new file mode 100644 index 0000000000..9cfa206b54 --- /dev/null +++ b/cloudify/scripts/configure_node.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python + +import subprocess +from cloudify import ctx +from cloudify.state import ctx_parameters as inputs + + +def execute_command(_command): + + ctx.logger.debug('_command {0}.'.format(_command)) + + subprocess_args = { + 'args': _command.split(), + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE + } + + ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args)) + + process = subprocess.Popen(**subprocess_args) + output, error = process.communicate() + + ctx.logger.debug('command: {0} '.format(_command)) + ctx.logger.debug('output: {0} '.format(output)) + ctx.logger.debug('error: {0} '.format(error)) + ctx.logger.debug('process.returncode: {0} '.format(process.returncode)) + + if process.returncode: + ctx.logger.error('Running `{0}` returns error.'.format(_command)) + return False + + return output + + +if __name__ == '__main__': + + join_command = inputs['join_command'] + join_command = 'sudo {0} --skip-preflight-checks'.format(join_command) + execute_command(join_command) + + # Install weave-related utils + execute_command('sudo curl -L git.io/weave -o /usr/local/bin/weave') + execute_command('sudo chmod a+x /usr/local/bin/weave') + execute_command('sudo curl -L git.io/scope -o /usr/local/bin/scope') + execute_command('sudo chmod a+x /usr/local/bin/scope') + execute_command('/usr/local/bin/scope launch') + + hostname = execute_command('hostname') + ctx.instance.runtime_properties['hostname'] = hostname.rstrip('\n') diff --git a/cloudify/scripts/create.py b/cloudify/scripts/create.py new file mode 100644 index 0000000000..eb362a4558 --- /dev/null +++ b/cloudify/scripts/create.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +import subprocess +from cloudify import ctx +from cloudify.exceptions import OperationRetry + + +def check_command(command): + + try: + process = subprocess.Popen( + command.split() + ) + except OSError: + return False + + output, error = process.communicate() + + ctx.logger.debug('command: {0} '.format(command)) + ctx.logger.debug('output: {0} '.format(output)) + ctx.logger.debug('error: {0} '.format(error)) + ctx.logger.debug('process.returncode: {0} '.format(process.returncode)) + + if process.returncode: + ctx.logger.error('Running `{0}` returns error.'.format(command)) + return False + + return True + + +def execute_command(_command): + + ctx.logger.debug('_command {0}.'.format(_command)) + + subprocess_args = { + 'args': _command.split(), + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE + } + + ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args)) + + process = subprocess.Popen(**subprocess_args) + output, error = process.communicate() + + ctx.logger.debug('command: {0} '.format(_command)) + ctx.logger.debug('output: {0} '.format(output)) + ctx.logger.debug('error: {0} '.format(error)) + ctx.logger.debug('process.returncode: {0} '.format(process.returncode)) + + if process.returncode: + ctx.logger.error('Running `{0}` returns error.'.format(_command)) + return False + + return output + + +if __name__ == '__main__': + + docker_command = 'docker ps' + + if not check_command(docker_command): + raise OperationRetry('Waiting for docker to be installed.') + + finished = False + ps = execute_command('ps -ef') + for line in ps.split('\n'): + if '/usr/bin/python /usr/bin/cloud-init modules' in line: + ctx.logger.error('in line') + raise OperationRetry('Waiting for Cloud Init to finish.') + + ctx.logger.info('Docker is ready and Cloud Init finished.') diff --git a/cloudify/scripts/tasks.py b/cloudify/scripts/tasks.py new file mode 100644 index 0000000000..035a780cb3 --- /dev/null +++ b/cloudify/scripts/tasks.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +from fabric.api import run + + +def label_node(labels, hostname): + if labels: + label_list = [] + for key, value in labels.items(): + label_pair_string = '%s=%s' % (key, value) + label_list.append(label_pair_string) + label_string = ' '.join(label_list) + command = 'kubectl label nodes %s %s' % (hostname, label_string) + run(command) + + +def stop_node(hostname): + command = 'kubectl drain %s' % (hostname) + run(command) + + +def delete_node(hostname): + command = 'kubectl delete no %s' % (hostname) + run(command) diff --git a/cloudify/types/kubernetes.yaml b/cloudify/types/kubernetes.yaml new file mode 100644 index 0000000000..1698aa210e --- /dev/null +++ b/cloudify/types/kubernetes.yaml @@ -0,0 +1,91 @@ +inputs: + + join_command: + type: string + + labels: + default: + app: { get_input: app_name } + + kubernetes_master_ip: + type: string + + kubernetes_master_agent_user: + default: { get_input: agent_user } + +node_types: + + cloudify.nodes.Kubernetes.Node: + derived_from: cloudify.nodes.Root + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: cloudify/scripts/create.py + configure: + implementation: cloudify/scripts/configure_node.py + inputs: + join_command: + default: { get_input: join_command } + start: + implementation: fabric.fabric_plugin.tasks.run_task + inputs: + tasks_file: + default: cloudify/scripts/tasks.py + task_name: + default: label_node + task_properties: + default: + hostname: { get_attribute: [ SELF, hostname ] } + labels: { get_input: labels } + fabric_env: + default: + host_string: { get_input: kubernetes_master_ip } + user: { get_input: kubernetes_master_agent_user } + key: { get_secret: agent_key_private } +# stop: +# implementation: fabric.fabric_plugin.tasks.run_task +# inputs: +# tasks_file: +# default: cloudify/scripts/tasks.py +# task_name: +# default: stop_node +# task_properties: +# default: +# hostname: { get_attribute: [ SELF, hostname ] } +# fabric_env: +# default: +# host_string: { get_input: kubernetes_master_ip } +# user: { get_input: kubernetes_master_agent_user } +# key: { get_secret: agent_key_private } + delete: + implementation: fabric.fabric_plugin.tasks.run_task + inputs: + tasks_file: + default: cloudify/scripts/tasks.py + task_name: + default: delete_node + task_properties: + default: + hostname: { get_attribute: [ SELF, hostname ] } + fabric_env: + default: + host_string: { get_input: kubernetes_master_ip } + user: { get_input: kubernetes_master_agent_user } + key: { get_secret: agent_key_private } + + cloudify.kubernetes.resources.Namespace: + derived_from: cloudify.kubernetes.resources.Main + properties: + _api_mapping: + default: + create: + api: CoreV1Api + method: create_namespace + payload: V1Namespace + read: + api: CoreV1Api + method: read_namespace + delete: + api: CoreV1Api + method: delete_namespace + payload: V1DeleteOptions diff --git a/message-router-blueprint.yaml b/message-router-blueprint.yaml new file mode 100644 index 0000000000..98a283020b --- /dev/null +++ b/message-router-blueprint.yaml @@ -0,0 +1,532 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +description: > + This example deploys the OOM Message Router application. Each service/deployment pair is associated with a single Kubernetes node. + Node template naming convention: PROVISIONINGAPI_RESOURCETYPE_APPLICATIONCOMPONENT + The following resources are created: + - Security Group + - openstack_port_zookeeper - NIC that connects to the Openstack Server + - openstack_port_kafka - NIC that connects to the Openstack Server + - openstack_port_dmaap - NIC that connects to the Openstack Server + - openstack_server_zookeeper - a VM that a Kubernetes Node is installed on. + - openstack_server_kafka - a VM that a Kubernetes Node is installed on. + - openstack_server_dmaap - a VM that a Kubernetes Node is installed on. + - kubernetes_node_zookeeper - a Kubernetes node that will join the Kubernetes cluster. + - kubernetes_node_kafka - a Kubernetes node that will join the Kubernetes cluster. + - kubernetes_node_dmaap - a Kubernetes node that will join the Kubernetes cluster. + - kubernetes_deployment_zookeeper - a Kubernetes deployment. + - kubernetes_deployment_kafka - a Kubernetes deployment. + - kubernetes_deployment_dmaap - a Kubernetes deployment. + - kubernetes_service_zookeeper - a Kubernetes service. + - kubernetes_service_kafka - a Kubernetes service. + - kubernetes_service_dmaap - a Kubernetes service. + The following pre-setup steps are assumed, but not required: + - Create Cloudify Example Environment: https://github.com/cloudify-examples/cloudify-environment-setup. + - Create Kubernetes Cluster: https://github.com/cloudify-examples/simple-kubernetes-blueprint. + +imports: + - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml + # Plugin required: https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn + - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-openstack-plugin/2.2.0/plugin.yaml + # Plugin required: https://github.com/cloudify-incubator/cloudify-utilities-plugin/releases/download/1.2.5/cloudify_utilities_plugin-1.2.5-py27-none-linux_x86_64-centos-Core.wgn + - https://raw.githubusercontent.com/cloudify-incubator/cloudify-utilities-plugin/1.2.5/plugin.yaml + # Plugin required: https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/releases/download/1.2.0/cloudify_kubernetes_plugin-1.2.0-py27-none-linux_x86_64-centos-Core.wgn + - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.0/plugin.yaml + # Plugin required: http://repository.cloudifysource.org/cloudify/wagons/cloudify-diamond-plugin/1.3.5/cloudify_diamond_plugin-1.3.5-py27-none-linux_x86_64-centos-Core.wgn + - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-diamond-plugin/1.3.5/plugin.yaml + # Plugin required: http://repository.cloudifysource.org/cloudify/wagons/cloudify-fabric-plugin/1.5/cloudify_fabric_plugin-1.5-py27-none-linux_x86_64-centos-Core.wgn + - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-fabric-plugin/1.5/plugin.yaml + - cloudify/types/kubernetes.yaml + +inputs: + + configuration_file_content: + type: string + + NS: + default: oom + + image: + description: Image to be used when launching agent VMs + default: { get_secret: centos_core_image } + + flavor: + description: Flavor of the agent VMs + default: { get_secret: large_image_flavor } + + agent_user: + description: > + User for connecting to agent VMs + default: centos + + app_name: + default: message-router + + security_group: + default: { concat: [ 'secgrp_', { get_input: app_name } ] } + +dsl_definitions: + + openstack_config: &openstack_config + username: { get_secret: keystone_username } + password: { get_secret: keystone_password } + tenant_name: { get_secret: keystone_tenant_name } + auth_url: { get_secret: keystone_url } + region: { get_secret: region } + +groups: + + openstack_server_port_group_zookeeper: + members: + - openstack_server_zookeeper + - openstack_port_zookeeper + + openstack_server_port_group_kafka: + members: + - openstack_server_kafka + - openstack_port_kafka + + openstack_server_port_group_dmaap: + members: + - openstack_server_dmaap + - openstack_port_dmaap + +policies: + + openstack_server_port_policies_scaling: + type: cloudify.policies.scaling + properties: + default_instances: 1 + targets: + - openstack_server_port_group_zookeeper + - openstack_server_port_group_kafka + - openstack_server_port_group_dmaap + +node_templates: + + kubernetes_service_zookeeper: + type: cloudify.kubernetes.resources.Service + properties: + definition: + apiVersion: v1 + kind: Service + metadata: + name: zookeeper + labels: + app: zookeeper + spec: + ports: + - name: zookeeper1 + port: 2181 + selector: + app: zookeeper + clusterIP: None + relationships: + - type: cloudify.kubernetes.relationships.managed_by_master + target: k8s + - type: cloudify.relationships.depends_on + target: kubernetes_deployment_zookeeper + + kubernetes_deployment_zookeeper: + type: cloudify.kubernetes.resources.Deployment + properties: + definition: + file: + resource_path: kubernetes/message-router/message-router-zookeeper.yaml + relationships: + - type: cloudify.kubernetes.relationships.managed_by_master + target: k8s + - type: cloudify.relationships.depends_on + target: kubernetes_node_zookeeper + + kubernetes_node_zookeeper: + type: cloudify.nodes.Kubernetes.Node + relationships: + - type: cloudify.relationships.contained_in + target: openstack_server_zookeeper + interfaces: + cloudify.interfaces.lifecycle: + start: + implementation: fabric.fabric_plugin.tasks.run_task + inputs: + task_properties: + hostname: { get_attribute: [ SELF, hostname ] } + labels: + app: zookeeper + + openstack_server_zookeeper: + type: cloudify.openstack.nodes.Server + properties: &openstack_server_properties + openstack_config: *openstack_config + agent_config: + user: { get_input: agent_user } + install_method: remote + port: 22 + key: { get_secret: agent_key_private } + server: + key_name: '' + image: '' + flavor: '' + management_network_name: { get_property: [ private_network, resource_id ] } + relationships: + - type: cloudify.relationships.contained_in + target: k8s_node_scaling_tier + - target: openstack_port_zookeeper + type: cloudify.openstack.server_connected_to_port + - type: cloudify.relationships.depends_on + target: cloud_init_openstack_server + interfaces: &openstack_server_interfaces + cloudify.interfaces.lifecycle: + create: + inputs: + args: + image: { get_input: image } + flavor: { get_input: flavor } + userdata: { get_attribute: [ cloud_init_openstack_server, cloud_config ] } + cloudify.interfaces.monitoring_agent: + install: + implementation: diamond.diamond_agent.tasks.install + inputs: + diamond_config: + interval: 1 + start: diamond.diamond_agent.tasks.start + stop: diamond.diamond_agent.tasks.stop + uninstall: diamond.diamond_agent.tasks.uninstall + cloudify.interfaces.monitoring: + start: + implementation: diamond.diamond_agent.tasks.add_collectors + inputs: + collectors_config: + CPUCollector: {} + MemoryCollector: {} + LoadAverageCollector: {} + DiskUsageCollector: + config: + devices: x?vd[a-z]+[0-9]*$ + NetworkCollector: {} + ProcessResourcesCollector: + config: + enabled: true + unit: B + measure_collector_time: true + cpu_interval: 0.5 + process: + hyperkube: + name: hyperkube + + openstack_port_zookeeper: + type: cloudify.openstack.nodes.Port + properties: + openstack_config: *openstack_config + relationships: &openstack_port_relationships + - type: cloudify.relationships.contained_in + target: k8s_node_scaling_tier + - type: cloudify.relationships.connected_to + target: private_network + - type: cloudify.relationships.depends_on + target: private_subnet + - type: cloudify.openstack.port_connected_to_security_group + target: security_group + + kubernetes_service_kafka: + type: cloudify.kubernetes.resources.Service + properties: + definition: + apiVersion: v1 + kind: Service + metadata: + name: global-kafka + labels: + app: global-kafka + spec: + ports: + - name: kafka1 + port: 9092 + selector: + app: global-kafka + clusterIP: None + relationships: + - type: cloudify.kubernetes.relationships.managed_by_master + target: k8s + - type: cloudify.relationships.depends_on + target: kubernetes_deployment_kafka + + kubernetes_deployment_kafka: + type: cloudify.kubernetes.resources.Deployment + properties: + definition: + file: + resource_path: kubernetes/message-router/message-router-kafka.yaml + relationships: + - type: cloudify.kubernetes.relationships.managed_by_master + target: k8s + - type: cloudify.relationships.depends_on + target: kubernetes_node_kafka + - type: cloudify.relationships.depends_on + target: kubernetes_deployment_zookeeper + + kubernetes_node_kafka: + type: cloudify.nodes.Kubernetes.Node + relationships: + - type: cloudify.relationships.contained_in + target: openstack_server_kafka + interfaces: + cloudify.interfaces.lifecycle: + start: + implementation: fabric.fabric_plugin.tasks.run_task + inputs: + task_properties: + hostname: { get_attribute: [ SELF, hostname ] } + labels: + app: global-kafka + + openstack_server_kafka: + type: cloudify.openstack.nodes.Server + properties: *openstack_server_properties + relationships: + - type: cloudify.relationships.contained_in + target: k8s_node_scaling_tier + - target: openstack_port_kafka + type: cloudify.openstack.server_connected_to_port + - type: cloudify.relationships.depends_on + target: cloud_init_openstack_server + interfaces: *openstack_server_interfaces + + openstack_port_kafka: + type: cloudify.openstack.nodes.Port + properties: + openstack_config: *openstack_config + relationships: *openstack_port_relationships + + kubernetes_service_dmaap: + type: cloudify.kubernetes.resources.Service + properties: + definition: + apiVersion: v1 + kind: Service + metadata: + name: dmaap + labels: + app: dmaap + version: 1.0.0 + spec: + ports: + - name: mr1 + port: 3904 + nodePort: 30227 + - name: mr2 + port: 3905 + nodePort: 30226 + selector: + app: dmaap + type: NodePort + relationships: + - type: cloudify.kubernetes.relationships.managed_by_master + target: k8s + - type: cloudify.relationships.depends_on + target: kubernetes_deployment_dmaap + + kubernetes_deployment_dmaap: + type: cloudify.kubernetes.resources.Deployment + properties: + definition: + file: + resource_path: kubernetes/message-router/message-router-dmaap.yaml + relationships: + - type: cloudify.kubernetes.relationships.managed_by_master + target: k8s + - type: cloudify.relationships.depends_on + target: kubernetes_node_dmaap + - type: cloudify.relationships.depends_on + target: kubernetes_deployment_zookeeper + + kubernetes_node_dmaap: + type: cloudify.nodes.Kubernetes.Node + relationships: + - type: cloudify.relationships.contained_in + target: openstack_server_dmaap + interfaces: + cloudify.interfaces.lifecycle: + start: + implementation: fabric.fabric_plugin.tasks.run_task + inputs: + task_properties: + hostname: { get_attribute: [ SELF, hostname ] } + labels: + app: global-dmaap + + openstack_server_dmaap: + type: cloudify.openstack.nodes.Server + properties: *openstack_server_properties + relationships: + - type: cloudify.relationships.contained_in + target: k8s_node_scaling_tier + - target: openstack_port_dmaap + type: cloudify.openstack.server_connected_to_port + - type: cloudify.relationships.depends_on + target: cloud_init_openstack_server + interfaces: *openstack_server_interfaces + + openstack_port_dmaap: + type: cloudify.openstack.nodes.Port + properties: + openstack_config: *openstack_config + relationships: *openstack_port_relationships + + security_group: + type: cloudify.openstack.nodes.SecurityGroup + properties: + openstack_config: *openstack_config + security_group: + name: { get_input: security_group } + description: kubernetes master security group + rules: + rules: + - remote_ip_prefix: 0.0.0.0/0 + port: 22 + - remote_ip_prefix: 0.0.0.0/0 + port: 53 + - remote_ip_prefix: 0.0.0.0/0 + port: 53 + protocol: udp + - remote_ip_prefix: 0.0.0.0/0 + port: 80 + - remote_ip_prefix: 0.0.0.0/0 + port: 443 + - remote_ip_prefix: 0.0.0.0/0 + port: 2379 + - remote_ip_prefix: 0.0.0.0/0 + port: 4001 + - remote_ip_prefix: 0.0.0.0/0 + port: 4789 + - remote_ip_prefix: 0.0.0.0/0 + port: 6443 + - remote_ip_prefix: 0.0.0.0/0 + port: 6783 + protocol: tcp + - remote_ip_prefix: 0.0.0.0/0 + port: 6783 + protocol: udp + - remote_ip_prefix: 0.0.0.0/0 + port: 6784 + protocol: tcp + - remote_ip_prefix: 0.0.0.0/0 + port: 6784 + protocol: udp + - remote_ip_prefix: 0.0.0.0/0 + port: 8000 + - remote_ip_prefix: 0.0.0.0/0 + port: 8080 + - remote_ip_prefix: 0.0.0.0/0 + port: 9090 + - remote_ip_prefix: 0.0.0.0/0 + port: 10250 + - remote_ip_prefix: 0.0.0.0/0 + port: 2181 + - remote_ip_prefix: 0.0.0.0/0 + port: 9092 + - remote_ip_prefix: 0.0.0.0/0 + port: 3904 + - remote_ip_prefix: 0.0.0.0/0 + port: 30227 + - remote_ip_prefix: 0.0.0.0/0 + port: 3905 + - remote_ip_prefix: 0.0.0.0/0 + port: 30226 + + private_subnet: + type: cloudify.openstack.nodes.Subnet + properties: + openstack_config: *openstack_config + use_external_resource: true + resource_id: { get_secret: private_subnet_name } + relationships: + - target: private_network + type: cloudify.relationships.contained_in + + private_network: + type: cloudify.openstack.nodes.Network + properties: + openstack_config: *openstack_config + use_external_resource: true + resource_id: { get_secret: private_network_name } + + external_network: + type: cloudify.openstack.nodes.Network + properties: + openstack_config: *openstack_config + use_external_resource: true + resource_id: { get_secret: external_network_name } + + cloud_init_openstack_server: + type: cloudify.nodes.CloudInit.CloudConfig + properties: + resource_config: + groups: + - docker + users: + - name: { get_input: agent_user } + primary-group: wheel + groups: docker + shell: /bin/bash + sudo: ['ALL=(ALL) NOPASSWD:ALL'] + ssh-authorized-keys: + - { get_secret: agent_key_public } + write_files: + - path: /etc/yum.repos.d/kubernetes.repo + owner: root:root + permissions: '0444' + content: | + # installed by cloud-init + [kubernetes] + name=Kubernetes + baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + packages: + - [epel-release] + - [gcc] + - [python-dev] + - [python-wheel] + - [python-setuptools] + - [libffi-devel] + - [python-devel] + - [openssl-devel] + - [docker, 1.12.6-28.git1398f24.el7.centos] + - [kubelet, 1.6.4-0] + - [kubeadm, 1.6.4-0] + - [kubectl, 1.6.4-0] + - [kubernetes-cni, 0.5.1-0] + - [git] + - [wget] + runcmd: + - systemctl enable docker + - systemctl start docker + - systemctl enable kubelet + - systemctl start kubelet + - yum install -y python-pip + - pip install --upgrade pip + - pip install docker-compose + - pip install backports.ssl_match_hostname --upgrade + - mkdir -p /tmp/oom/ + - git clone https://gerrit.onap.org/r/oom.git /tmp/oom + - sleep 15 + - chmod 755 /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh + - sed -i -e "s/\.\/docker_files/\/tmp\/oom\/kubernetes\/config\/docker\/init\/src\/config\/message-router\/dcae-startup-vm-message-router\/docker_files/g" /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh + - sed -i -e "s/\/opt\/docker\/docker-compose/\/bin\/docker-compose/g" /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh + - mv /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/__docker-compose.yml /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/docker-compose.yml + - mv /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/__MsgRtrApi.properties /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/MsgRtrApi.properties + - sh -c /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh + + k8s_node_scaling_tier: + type: cloudify.nodes.Root + + k8s: + type: cloudify.kubernetes.nodes.Master + properties: + configuration: + file_content: { get_input: configuration_file_content } + |