summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Hwang <mhwang@research.att.com>2017-08-23 14:26:36 -0400
committerMichael Hwang <mhwang@research.att.com>2017-08-23 14:31:51 -0400
commit21af561cafe31681f94479e8c70f157f6e6ecc53 (patch)
treef41f69e1419867fd70af1f7697f78e3490d41b87
parent94cbaca0f5d9447afe9b0f392f248470420422e5 (diff)
Add docker and relationships plugin
Change-Id: I323599ae2965f081f2061b6791635bbeddb09811 Issue-Id: DCAEGEN2-79 Signed-off-by: Michael Hwang <mhwang@research.att.com>
-rw-r--r--docker/.gitignore68
-rw-r--r--docker/ChangeLog.md47
-rw-r--r--docker/LICENSE.txt32
-rw-r--r--docker/README.md195
-rw-r--r--docker/docker-node-type.yaml244
-rw-r--r--docker/dockerplugin/__init__.py30
-rw-r--r--docker/dockerplugin/decorators.py80
-rw-r--r--docker/dockerplugin/discovery.py206
-rw-r--r--docker/dockerplugin/exceptions.py29
-rw-r--r--docker/dockerplugin/tasks.py563
-rw-r--r--docker/dockerplugin/utils.py28
-rw-r--r--docker/examples/blueprint-laika-dmaap-pubs.yaml148
-rw-r--r--docker/examples/blueprint-laika-dmaap-pubsub.yaml150
-rw-r--r--docker/examples/blueprint-laika-dmaap-subs.yaml156
-rw-r--r--docker/examples/blueprint-laika.yaml89
-rw-r--r--docker/examples/blueprint-registrator.yaml47
-rw-r--r--docker/requirements.txt3
-rw-r--r--docker/setup.py36
-rw-r--r--docker/tests/test_discovery.py40
-rw-r--r--docker/tests/test_tasks.py198
-rw-r--r--relationships/.gitignore93
-rw-r--r--relationships/LICENSE.txt32
-rw-r--r--relationships/README.md54
-rw-r--r--relationships/example_register_to_blueprint.yaml24
-rw-r--r--relationships/relationship-types.yaml65
-rw-r--r--relationships/relationshipplugin/__init__.py23
-rw-r--r--relationships/relationshipplugin/discovery.py84
-rw-r--r--relationships/relationshipplugin/tasks.py131
-rw-r--r--relationships/requirements.txt12
-rw-r--r--relationships/setup.py35
-rw-r--r--relationships/tests/test_discovery.py44
31 files changed, 2986 insertions, 0 deletions
diff --git a/docker/.gitignore b/docker/.gitignore
new file mode 100644
index 0000000..8f0f9ba
--- /dev/null
+++ b/docker/.gitignore
@@ -0,0 +1,68 @@
+cfyhelper
+.cloudify
+*.swp
+*.swn
+*.swo
+.DS_Store
+.project
+.pydevproject
+venv
+
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
diff --git a/docker/ChangeLog.md b/docker/ChangeLog.md
new file mode 100644
index 0000000..673e672
--- /dev/null
+++ b/docker/ChangeLog.md
@@ -0,0 +1,47 @@
+# Change Log
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/)
+and this project adheres to [Semantic Versioning](http://semver.org/).
+
+## [2.3.0]
+
+* Rip out dockering and use common python-dockering library
+ - Using 1.2.0 of python-dockering supports Docker exec based health checks
+* Support mapping ports and volumes when provided in docker config
+
+## [2.2.0]
+
+* Add `dcae.nodes.DockerContainerForComponentsUsingDmaap` node type and parse streams_publishes and streams_subscribes to be used by the DMaaP plugin.
+ - Handle message router wiring in the create operation for components
+ - Handle data router wiring in the create and in the start operation for components
+* Refactor the create operations and the start operations for components. Refactored to be functional to enable for better unit test coverage.
+* Add decorators for common cross cutting functionality
+* Add example blueprints for different dmaap cases
+
+## [2.1.0]
+
+* Add the node type `DockerContainerForPlatforms` which is intended for platform services who are to have well known names and ports
+* Add backdoor for `DockerContainerForComponents` to statically map ports
+* Add hack fix to allow this plugin access to the research nexus
+* Add support for dns through the local Consul agent
+* Free this plugin from the CentOS bondage
+
+## [2.0.0]
+
+* Remove the magic env.ini code. It's no longer needed because we are now running local agents of Consul.
+* Save and use the docker container id
+* `DockerContainer` is now a different node type that is much simpler than `DockerContainerforComponents`. It is targeted for the use case of registrator. This involved overhauling the create and start container functionality.
+* Classify connection and docker host not found error as recoverable
+* Apply CONSUL_HOST to point to the local Consul agent
+
+## [1.0.0]
+
+* Implement health checks - expose health checks on the node and register Docker containers with it. Note that health checks are currently optional.
+* Add option to remove images in the stop operation
+* Verify that the container is running and healthy before finishing the start operation
+* Image names passed in are now required to be the fully tagged names including registry
+* Remove references to rework in the code namespaces
+* Application configuration is now a YAML map to accomodate future blueprint generation
+* Update blueprints and cfyhelper.sh
diff --git a/docker/LICENSE.txt b/docker/LICENSE.txt
new file mode 100644
index 0000000..cb8008a
--- /dev/null
+++ b/docker/LICENSE.txt
@@ -0,0 +1,32 @@
+============LICENSE_START=======================================================
+org.onap.dcae
+================================================================================
+Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+
+ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+
+Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+===================================================================
+Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+you may not use this documentation except in compliance with the License.
+You may obtain a copy of the License at
+ https://creativecommons.org/licenses/by/4.0/
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 0000000..08fb570
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,195 @@
+# docker-cloudify
+
+This repository contains Cloudify artifacts used to orchestrate the deployment of Docker containers. See the example blueprints in the [`examples` directory](examples).
+
+More details about what is expected from Docker components can be found in the DCAE ONAP documentation.
+
+## Input parameters
+
+### start
+
+These input parameters are for the `start` `cloudify.interfaces.lifecycle` and are inputs into the variant task operations `create_and_start_container*`.
+
+#### `envs`
+
+A map of environment variables that is intended to be forwarded to the Docker container as environment variables. Example:
+
+```yaml
+envs:
+ EXTERNAL_IP: '10.100.1.99'
+```
+
+These environment variables will be forwarded in addition to the *platform-related* environment variables like `CONSUL_HOST`.
+
+#### `volumes`
+
+List of maps used for setting up Docker volume mounts. Example:
+
+```yaml
+volumes:
+ - host:
+ path: '/var/run/docker.sock'
+ container:
+ bind: '/tmp/docker.sock'
+ mode: 'ro'
+```
+
+This information is used to pass forward into [`docker-py` create container call](http://docker-py.readthedocs.io/en/1.10.6/volumes.html).
+
+key | description
+--- | -----------
+path | Full path to the file or directory on the host machine to be mounted
+bind | Full path to the file or directory in the container where the volume should be mounted to
+mode | Readable, writeable: `ro`, `rw`
+
+#### `ports`
+
+List of strings - Used to bind container ports to host ports. Each item is of the format: `<container port>:<host port>`.
+
+Note that `DockerContainerForPlatforms` has the property pair `host_port` and `container_port`. This pair will be merged with the input parameters ports.
+
+```yaml
+ports:
+ - '8000:8000'
+```
+
+Default is `None`.
+
+#### `max_wait`
+
+Integer - seconds to wait for Docker to come up healthy before throwing a `NonRecoverableError`.
+
+```yaml
+max_wait:
+ 60
+```
+
+Default is 300 seconds.
+
+### stop
+
+These input parameters are for the `stop` `cloudify.interfaces.lifecycle` and are inputs into the task operation `stop_and_remove_container`.
+
+#### `cleanup_image`
+
+Boolean that controls whether to attempt to remove the associated Docker image (true) or not (false).
+
+```yaml
+cleanup_image
+ True
+```
+
+Default is false.
+
+## Using DMaaP
+
+The node type `dcae.nodes.DockerContainerForComponentsUsingDmaap` is intended to be used by components that use DMaaP and expects to be connected with the DMaaP node types found in the DMaaP plugin.
+
+### Node properties
+
+The properties `streams_publishes` and `streams_subscribes` both are lists of objects that are intended to be passed into the DMaaP plugin and used to create additional parameters that will be passed into the DMaaP plugin.
+
+#### Message router
+
+For message router publishers and subscribers, the objects look like:
+
+```yaml
+name: topic00
+location: mtc5
+client_role: XXXX
+type: message_router
+```
+
+Where `name` is the node name of `dcae.nodes.Topic` or `dcae.nodes.ExistingTopic` that the Docker node is connecting with via the relationships `dcae.relationships.publish_events` for publishing and `dcae.relationships.subscribe_to_events` for subscribing.
+
+#### Data router
+
+For data router publishers, the object looks like:
+
+```yaml
+name: feed00
+location: mtc5
+type: data_router
+```
+
+Where `name` is the node name of `dcae.nodes.Feed` or `dcae.nodes.ExistingFeed` that the Docker node is connecting with via the relationships `dcae.relationships.publish_files`.
+
+For data router subscribers, the object looks like:
+
+```yaml
+name: feed00
+location: mtc5
+type: data_router
+username: king
+password: "123456"
+route: some-path
+scheme: https
+```
+
+Where the relationship to use is `dcae.relationships.subscribe_to_files`.
+
+If `username` and `password` are not provided, then the plugin will generate username and password pair.
+
+`route` and `scheme` are parameter used in the dynamic construction of the delivery url which will be passed to the DMaaP plugin to be used in the setting up of the subscriber to the feed.
+
+`route` is the http path endpoint of the subscriber that will handle files from the associated feed.
+
+`scheme` is either `http` or `https`. If not specified, then the plugin will default to `http`.
+
+### Component configuration
+
+The DMaaP plugin is responsible to provision the feed/topic and store into Consul the resulting DMaaP connection details. Here is an example:
+
+```json
+{
+ "topic00": {
+ "client_role": "XXXX",
+ "client_id": "XXXX",
+ "location": "XXXX",
+ "topic_url": "https://some-topic-url.com/events/abc"
+ }
+}
+```
+
+This is to be merged with the templetized application configuration:
+
+```json
+{
+ "some-param": "Lorem ipsum dolor sit amet",
+ "streams_subscribes": {
+ "topic-alpha": {
+ "type": "message_router",
+ "aaf_username": "user-foo",
+ "aaf_password": "password-bar",
+ "dmaap_info": "<< topic00 >>"
+ },
+ },
+ "streams_publishes": {},
+ "services_calls": {}
+}
+```
+
+To form the application configuration:
+
+```json
+{
+ "some-param": "Lorem ipsum dolor sit amet",
+ "streams_subscribes": {
+ "topic-alpha": {
+ "type": "message_router",
+ "aaf_username": "user-foo",
+ "aaf_password": "password-bar",
+ "dmaap_info": {
+ "client_role": "XXXX",
+ "client_id": "XXXX",
+ "location": "XXXX",
+ "topic_url": "https://some-topic-url.com/events/abc"
+ }
+ },
+ },
+ "streams_publishes": {},
+ "services_calls": {}
+}
+```
+
+This also applies to data router feeds.
diff --git a/docker/docker-node-type.yaml b/docker/docker-node-type.yaml
new file mode 100644
index 0000000..5fb0e27
--- /dev/null
+++ b/docker/docker-node-type.yaml
@@ -0,0 +1,244 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+
+plugins:
+ docker:
+ executor: 'central_deployment_agent'
+ package_name: dockerplugin
+ package_version: 2.3.0
+
+node_types:
+ # The DockerContainerForComponents node type is to be used for DCAE service components that
+ # are to be run in a Docker container. This node type goes beyond that of a ordinary Docker
+ # plugin where it has DCAE platform specific functionality:
+ #
+ # * Generation of the service component name
+ # * Managing of service component configuration information
+ #
+ # The Docker run command arguments are intentionally not visible. This node type is
+ # not intended to be a generic all-purpose Docker container thing. This should be thought
+ # to be an interface to how Docker containers are to be run in the rework context.
+ dcae.nodes.DockerContainerForComponents:
+ derived_from: cloudify.nodes.Root
+ properties:
+ service_component_type:
+ type: string
+ description: Service component type of the application being run in the container
+
+ service_id:
+ type: string
+ description: Unique id for this DCAE service instance this component belongs to
+
+ location_id:
+ type: string
+ description: Location id of where to run the container
+
+ service_component_name_override:
+ type: string
+ description: >
+ Manually override and set the name for this Docker container node. If this
+ is set, then the name will not be auto-generated. Platform services are the
+ specific use cases for using this parameter because they have static
+ names for example the CDAP broker.
+ default: Null
+
+ application_config:
+ default: {}
+ description: >
+ Application configuration for this Docker component. The data strcture is
+ expected to be a complex map (native YAML) and to be constructed and filled
+ by the creator of the blueprint.
+
+ docker_config:
+ default: {}
+ description: >
+ This is what is the auxilary portion of the component spec that contains things
+ like healthcheck definitions for the Docker component. Health checks are
+ optional.
+
+ image:
+ type: string
+ description: Full uri of the Docker image
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Generate service component name and populate config into Consul
+ implementation: docker.dockerplugin.create_for_components
+ start:
+ # Create Docker container and start
+ implementation: docker.dockerplugin.create_and_start_container_for_components
+ stop:
+ # Stop and remove Docker container
+ implementation: docker.dockerplugin.stop_and_remove_container
+ delete:
+ # Delete configuration from Consul
+ implementation: docker.dockerplugin.cleanup_discovery
+
+
+ # This node type is intended for DCAE service components that use DMaaP and must use the
+ # DMaaP plugin.
+ dcae.nodes.DockerContainerForComponentsUsingDmaap:
+ derived_from: dcae.nodes.DockerContainerForComponents
+ properties:
+ streams_publishes:
+ description: >
+ List of DMaaP streams used for publishing.
+
+ Message router items look like:
+
+ name: topic00
+ location: mtc5
+ client_role: XXXX
+ type: message_router
+
+ Data router items look like:
+
+ name: feed00
+ location: mtc5
+ type: data_router
+
+ This information is forwarded to the dmaap plugin to provision
+ default: []
+ streams_subscribes:
+ description: >
+ List of DMaaP streams used for subscribing.
+
+ Message router items look like:
+
+ name: topic00
+ location: mtc5
+ client_role: XXXX
+ type: message_router
+
+ Data router items look like:
+
+ name: feed00
+ location: mtc5
+ type: data_router
+ username: king
+ password: 123456
+ route: some-path
+ scheme: https
+
+ Note that username and password is optional. If not provided or null then the
+ plugin will generate them.
+
+ default: []
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Generate service component name and populate config into Consul
+ implementation: docker.dockerplugin.create_for_components_with_streams
+ start:
+ # Create Docker container and start
+ implementation: docker.dockerplugin.create_and_start_container_for_components_with_streams
+
+
+ # DockerContainerForPlatforms is intended for DCAE platform services. Unlike the components,
+ # platform services have well-known names and well-known ports.
+ dcae.nodes.DockerContainerForPlatforms:
+ derived_from: cloudify.nodes.Root
+ properties:
+ name:
+ description: >
+ Container name used to register with Consul
+
+ application_config:
+ default: {}
+ description: >
+ Application configuration for this Docker component. The data strcture is
+ expected to be a complex map (native YAML) and to be constructed and filled
+ by the creator of the blueprint.
+
+ docker_config:
+ default: {}
+ description: >
+ This is what is the auxilary portion of the component spec that contains things
+ like healthcheck definitions for the Docker component. Health checks are
+ optional.
+
+ image:
+ type: string
+ description: Full uri of the Docker image
+
+ host_port:
+ type: integer
+ description: >
+ Network port that the platform service is expecting to expose on the host
+ default: 0
+
+ container_port:
+ type: integer
+ description: >
+ Network port that the platform service exposes in the container
+ default: 0
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Populate config into Consul
+ implementation: docker.dockerplugin.create_for_platforms
+ start:
+ # Create Docker container and start
+ implementation: docker.dockerplugin.create_and_start_container_for_platforms
+ stop:
+ # Stop and remove Docker container
+ implementation: docker.dockerplugin.stop_and_remove_container
+ delete:
+ # Delete configuration from Consul
+ implementation: docker.dockerplugin.cleanup_discovery
+
+
+ # DockerContainer is intended to be more of an all-purpose Docker container node
+ # for non-componentized applications.
+ dcae.nodes.DockerContainer:
+ derived_from: cloudify.nodes.Root
+ properties:
+ name:
+ type: string
+ description: Name of the Docker container to be given
+ image:
+ type: string
+ description: Full uri of the Docker image
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ # Create Docker container and start
+ implementation: docker.dockerplugin.create_and_start_container
+ stop:
+ # Stop and remove Docker container
+ implementation: docker.dockerplugin.stop_and_remove_container
+
+
+ # TODO: Revisit using Docker swarm
+ # The DockerSwarm node type provides the connection information of an available Docker swarm
+ # cluster to be used to run Docker containers given search contraints like location.
+ # This node type is not responsible for instantiating and managing the Docker swarm clusters.
+
+ # The DockerHost node is responsible for selecting a pre-existing Docker host to run
+ # Docker containers on. It is not responsible for instantiating new Docker hosts or expanding
+ # more resources.
+ dcae.nodes.SelectedDockerHost:
+ derived_from: cloudify.nodes.Root
+ properties:
+ location_id:
+ type: string
+ description: Location id of the Docker host to use
+
+ # REVIEW: This field should really be optional but because there's no functionality
+ # that provides the dynamic solution sought after yet, it has been promoted to be
+ # required.
+ docker_host_override:
+ type: string
+ description: Docker hostname here is used as a manual override
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Provide the Docker host to use for containers
+ implementation: docker.dockerplugin.select_docker_host
+ delete:
+ implementation: docker.dockerplugin.unselect_docker_host
diff --git a/docker/dockerplugin/__init__.py b/docker/dockerplugin/__init__.py
new file mode 100644
index 0000000..ef1bfec
--- /dev/null
+++ b/docker/dockerplugin/__init__.py
@@ -0,0 +1,30 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# REVIEW: Tried to source the version from here but you run into import issues
+# because "tasks" module is loaded. This method seems to be the PEP 396
+# recommended way and is listed #3 here https://packaging.python.org/single_source_version/
+# __version__ = '0.1.0'
+
+from .tasks import create_for_components, create_for_components_with_streams, \
+ create_and_start_container_for_components_with_streams, \
+ create_for_platforms, create_and_start_container, \
+ create_and_start_container_for_components, create_and_start_container_for_platforms, \
+ stop_and_remove_container, cleanup_discovery, select_docker_host, unselect_docker_host
diff --git a/docker/dockerplugin/decorators.py b/docker/dockerplugin/decorators.py
new file mode 100644
index 0000000..089231a
--- /dev/null
+++ b/docker/dockerplugin/decorators.py
@@ -0,0 +1,80 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import copy
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError, RecoverableError
+from dockering import utils as doc
+from dockerplugin import discovery as dis
+from dockerplugin.exceptions import DockerPluginDeploymentError, \
+ DockerPluginDependencyNotReadyError
+
+
+def monkeypatch_loggers(task_func):
+ """Sets up the dependent loggers"""
+
+ def wrapper(**kwargs):
+ # Ouch! Monkeypatch loggers
+ doc.logger = ctx.logger
+ dis.logger = ctx.logger
+
+ return task_func(**kwargs)
+
+ return wrapper
+
+
+def wrap_error_handling_start(task_start_func):
+ """Wrap error handling for the start operations"""
+
+ def wrapper(**kwargs):
+ try:
+ return task_start_func(**kwargs)
+ except DockerPluginDependencyNotReadyError as e:
+ # You are here because things we need like a working docker host is not
+ # available yet so let Cloudify try again later.
+ raise RecoverableError(e)
+ except DockerPluginDeploymentError as e:
+ # Container failed to come up in the allotted time. This is deemed
+ # non-recoverable.
+ raise NonRecoverableError(e)
+ except Exception as e:
+ ctx.logger.error("Unexpected error while starting container: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+ return wrapper
+
+
+def merge_inputs_for_start(task_start_func):
+ """Merge all inputs for start operation into one dict"""
+
+ def wrapper (**kwargs):
+ start_inputs = copy.deepcopy(ctx.instance.runtime_properties)
+ start_inputs.update(kwargs)
+
+ # Apparently kwargs contains "ctx" which is cloudify.context.CloudifyContext
+ # This has to be removed and not copied into runtime_properties else you get
+ # JSON serialization errors.
+ if "ctx" in start_inputs:
+ del start_inputs["ctx"]
+
+ return task_start_func(**start_inputs)
+
+ return wrapper
diff --git a/docker/dockerplugin/discovery.py b/docker/dockerplugin/discovery.py
new file mode 100644
index 0000000..32a8cd0
--- /dev/null
+++ b/docker/dockerplugin/discovery.py
@@ -0,0 +1,206 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+from functools import partial
+import json
+import logging
+import uuid
+import requests
+import consul
+
+
+logger = logging.getLogger("discovery")
+
+
+class DiscoveryError(RuntimeError):
+ pass
+
+class DiscoveryConnectionError(RuntimeError):
+ pass
+
+class DiscoveryServiceNotFoundError(RuntimeError):
+ pass
+
+
+def _wrap_consul_call(consul_func, *args, **kwargs):
+ """Wrap Consul call to map errors"""
+ try:
+ return consul_func(*args, **kwargs)
+ except requests.exceptions.ConnectionError as e:
+ raise DiscoveryConnectionError(e)
+
+
+def generate_service_component_name(service_component_type, service_id, location_id):
+ """Generate service component id used to pass into the service component
+ instance and used as the key to the service component configuration.
+
+ Format:
+ <service component id>.<service component type>.<service id>.<location id>.dcae.com
+
+ TODO: The format will evolve.
+ """
+ # Random generated
+ service_component_id = str(uuid.uuid4())
+ return "{0}.{1}.{2}.{3}.dcae.com".format(
+ service_component_id, service_component_type, service_id, location_id)
+
+
+def create_kv_conn(host):
+ """Create connection to key-value store
+
+ Returns a Consul client to the specified Consul host"""
+ try:
+ [hostname, port] = host.split(":")
+ return consul.Consul(host=hostname, port=int(port))
+ except ValueError as e:
+ return consul.Consul(host=host)
+
+def push_service_component_config(kv_conn, service_component_name, config):
+ config_string = config if isinstance(config, str) else json.dumps(config)
+ kv_put_func = partial(_wrap_consul_call, kv_conn.kv.put)
+
+ if kv_put_func(service_component_name, config_string):
+ logger.info("Added config for {0}".format(service_component_name))
+ else:
+ raise DiscoveryError("Failed to push configuration")
+
+def remove_service_component_config(kv_conn, service_component_name):
+ kv_delete_func = partial(_wrap_consul_call, kv_conn.kv.delete)
+ kv_delete_func(service_component_name)
+
+
+def _create_rel_key(service_component_name):
+ return "{0}:rel".format(service_component_name)
+
+def store_relationship(kv_conn, source_name, target_name):
+ # TODO: Rel entry may already exist in a one-to-many situation. Need to
+ # support that.
+ rel_key = _create_rel_key(source_name)
+ rel_value = [target_name] if target_name else []
+
+ kv_put_func = partial(_wrap_consul_call, kv_conn.kv.put)
+ kv_put_func(rel_key, json.dumps(rel_value))
+ logger.info("Added relationship for {0}".format(rel_key))
+
+def delete_relationship(kv_conn, service_component_name):
+ rel_key = _create_rel_key(service_component_name)
+ kv_get_func = partial(_wrap_consul_call, kv_conn.kv.get)
+ index, rels = kv_get_func(rel_key)
+
+ if rels:
+ rels = json.loads(rels["Value"].decode("utf-8"))
+ kv_delete_func = partial(_wrap_consul_call, kv_conn.kv.delete)
+ kv_delete_func(rel_key)
+ return rels
+ else:
+ return []
+
+def lookup_service(kv_conn, service_component_name):
+ catalog_get_func = partial(_wrap_consul_call, kv_conn.catalog.service)
+ index, results = catalog_get_func(service_component_name)
+
+ if results:
+ return results
+ else:
+ raise DiscoveryServiceNotFoundError("Failed to find: {0}".format(service_component_name))
+
+
+# TODO: Note these functions have been (for the most part) shamelessly lifted from
+# dcae-cli and should really be shared.
+
+def _is_healthy_pure(get_health_func, instance):
+ """Checks to see if a component instance is running healthy
+
+ Pure function edition
+
+ Args
+ ----
+ get_health_func: func(string) -> complex object
+ Look at unittests in test_discovery to see examples
+ instance: (string) fully qualified name of component instance
+
+ Returns
+ -------
+ True if instance has been found and is healthy else False
+ """
+ index, resp = get_health_func(instance)
+
+ if resp:
+ def is_passing(instance):
+ return all([check["Status"] == "passing" for check in instance["Checks"]])
+
+ return any([is_passing(instance) for instance in resp])
+ else:
+ return False
+
+def is_healthy(consul_host, instance):
+ """Checks to see if a component instance is running healthy
+
+ Impure function edition
+
+ Args
+ ----
+ consul_host: (string) host string of Consul
+ instance: (string) fully qualified name of component instance
+
+ Returns
+ -------
+ True if instance has been found and is healthy else False
+ """
+ cons = create_kv_conn(consul_host)
+
+ get_health_func = partial(_wrap_consul_call, cons.health.service)
+ return _is_healthy_pure(get_health_func, instance)
+
+
+def add_to_entry(conn, key, add_name, add_value):
+ """
+ Find 'key' in consul.
+ Treat its value as a JSON string representing a dict.
+ Extend the dict by adding an entry with key 'add_name' and value 'add_value'.
+ Turn the resulting extended dict into a JSON string.
+ Store the string back into Consul under 'key'.
+ Watch out for conflicting concurrent updates.
+
+ Example:
+ Key 'xyz:dmaap' has the value '{"feed00": {"feed_url" : "http://example.com/feeds/999"}}'
+ add_to_entry('xyz:dmaap', 'topic00', {'topic_url' : 'http://example.com/topics/1229'})
+ should result in the value for key 'xyz:dmaap' in consul being updated to
+ '{"feed00": {"feed_url" : "http://example.com/feeds/999"}, "topic00" : {"topic_url" : "http://example.com/topics/1229"}}'
+ """
+ while True: # do until update succeeds
+ (index, val) = conn.kv.get(key) # index gives version of key retrieved
+
+ if val is None: # no key yet
+ vstring = '{}'
+ mod_index = 0 # Use 0 as the cas index for initial insertion of the key
+ else:
+ vstring = val['Value']
+ mod_index = val['ModifyIndex']
+
+ # Build the updated dict
+ # Exceptions just propagate
+ v = json.loads(vstring)
+ v[add_name] = add_value
+ new_vstring = json.dumps(v)
+
+ updated = conn.kv.put(key, new_vstring, cas=mod_index) # if the key has changed since retrieval, this will return false
+ if updated:
+ return v
diff --git a/docker/dockerplugin/exceptions.py b/docker/dockerplugin/exceptions.py
new file mode 100644
index 0000000..0d8a341
--- /dev/null
+++ b/docker/dockerplugin/exceptions.py
@@ -0,0 +1,29 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+class DockerPluginDeploymentError(RuntimeError):
+ pass
+
+
+class DockerPluginDependencyNotReadyError(RuntimeError):
+ """Error to use when something that this plugin depends upon e.g. docker api,
+ consul is not ready"""
+ pass
+
diff --git a/docker/dockerplugin/tasks.py b/docker/dockerplugin/tasks.py
new file mode 100644
index 0000000..a41f143
--- /dev/null
+++ b/docker/dockerplugin/tasks.py
@@ -0,0 +1,563 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# Lifecycle interface calls for DockerContainer
+
+import json, time, copy
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError, RecoverableError
+import dockering as doc
+from dockerplugin import discovery as dis
+from dockerplugin.decorators import monkeypatch_loggers, wrap_error_handling_start, \
+ merge_inputs_for_start
+from dockerplugin.exceptions import DockerPluginDeploymentError, \
+ DockerPluginDependencyNotReadyError
+from dockerplugin import utils
+
+# TODO: Remove this Docker port hardcoding and query for this port instead
+DOCKER_PORT = 2376
+# Always use the local Consul agent for interfacing with Consul from the plugin.
+# Safe to assume that its always there.
+CONSUL_HOST = "localhost"
+
+# Used to construct delivery urls for data router subscribers. Data router in FTL
+# requires https but this author believes that ONAP is to be defaulted to http.
+DEFAULT_SCHEME = "http"
+
+# Property keys
+SERVICE_COMPONENT_NAME = "service_component_name"
+SELECTED_CONTAINER_DESTINATION = "selected_container_destination"
+CONTAINER_ID = "container_id"
+
+# Lifecycle interface calls for dcae.nodes.DockerContainer
+
+def _setup_for_discovery(**kwargs):
+ """Setup for config discovery"""
+ try:
+ name = kwargs['name']
+ application_config = kwargs['application_config']
+
+ # NOTE: application_config is no longer a json string and is inputed as a
+ # YAML map which translates to a dict. We don't have to do any
+ # preprocessing anymore.
+ conn = dis.create_kv_conn(CONSUL_HOST)
+ dis.push_service_component_config(conn, name, application_config)
+ return kwargs
+ except dis.DiscoveryConnectionError as e:
+ raise RecoverableError(e)
+ except Exception as e:
+ ctx.logger.error("Unexpected error while pushing configuration: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+def _generate_component_name(**kwargs):
+ """Generate component name"""
+ service_component_type = kwargs['service_component_type']
+ service_id = kwargs['service_id']
+ location_id = kwargs['location_id']
+
+ name_override = kwargs['service_component_name_override']
+
+ kwargs['name'] = name_override if name_override \
+ else dis.generate_service_component_name(service_component_type,
+ service_id, location_id)
+ return kwargs
+
+def _done_for_create(**kwargs):
+ """Wrap up create operation"""
+ name = kwargs['name']
+ kwargs[SERVICE_COMPONENT_NAME] = name
+ # All updates to the runtime_properties happens here. I don't see a reason
+ # why we shouldn't do this because the context is not being mutated by
+ # something else and will keep the other functions pure (pure in the sense
+ # not dealing with CloudifyContext).
+ ctx.instance.runtime_properties.update(kwargs)
+ ctx.logger.info("Done setting up: {0}".format(name))
+ return kwargs
+
+
+@monkeypatch_loggers
+@operation
+def create_for_components(**kwargs):
+ """Create step for Docker containers that are components
+
+ This interface is responible for:
+
+ 1. Generating service component name
+ 2. Populating config information into Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **_generate_component_name(
+ **ctx.node.properties)))
+
+
+def _parse_streams(**kwargs):
+ """Parse streams and setup for DMaaP plugin"""
+ # The DMaaP plugin requires this plugin to set the runtime properties
+ # keyed by the node name.
+ def setup_publishes(s):
+ kwargs[s["name"]] = s
+
+ map(setup_publishes, kwargs["streams_publishes"])
+
+ def setup_subscribes(s):
+ if s["type"] == "data_router":
+ # If username and password has been provided then generate it. The
+ # DMaaP plugin doesn't generate for subscribers. The generation code
+ # and length of username password has been lifted from the DMaaP
+ # plugin.
+
+ # Don't want to mutate the source
+ s = copy.deepcopy(s)
+ if not s.get("username", None):
+ s["username"] = utils.random_string(8)
+ if not s.get("password", None):
+ s["password"] = utils.random_string(10)
+
+ kwargs[s["name"]] = s
+
+ # NOTE: That the delivery url is constructed and setup in the start operation
+ map(setup_subscribes, kwargs["streams_subscribes"])
+
+ return kwargs
+
+def _setup_for_discovery_streams(**kwargs):
+ """Setup for discovery of streams
+
+ Specifically, there's a race condition this call addresses for data router
+ subscriber case. The component needs its feed subscriber information but the
+ DMaaP plugin doesn't provide this until after the docker plugin start
+ operation.
+ """
+ dr_subs = [kwargs[s["name"]] for s in kwargs["streams_subscribes"] \
+ if s["type"] == "data_router"]
+
+ if dr_subs:
+ dmaap_kv_key = "{0}:dmaap".format(kwargs["name"])
+ conn = dis.create_kv_conn(CONSUL_HOST)
+
+ def add_feed(dr_sub):
+ # delivery url and subscriber id will be fill by the dmaap plugin later
+ v = { "location": dr_sub["location"], "delivery_url": None,
+ "username": dr_sub["username"], "password": dr_sub["password"],
+ "subscriber_id": None }
+ return dis.add_to_entry(conn, dmaap_kv_key, dr_sub["name"], v) != None
+
+ try:
+ if all(map(add_feed, dr_subs)):
+ return kwargs
+ except Exception as e:
+ raise NonRecoverableError(e)
+
+ # You should never get here
+ raise NonRecoverableError("Failure updating feed streams in Consul")
+ else:
+ return kwargs
+
+
+@monkeypatch_loggers
+@operation
+def create_for_components_with_streams(**kwargs):
+ """Create step for Docker containers that are components that use DMaaP
+
+ This interface is responible for:
+
+ 1. Generating service component name
+ 2. Setup runtime properties for DMaaP plugin
+ 3. Populating application config into Consul
+ 4. Populating DMaaP config for data router subscribers in Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **_setup_for_discovery_streams(
+ **_parse_streams(
+ **_generate_component_name(
+ **ctx.node.properties)))))
+
+
+@monkeypatch_loggers
+@operation
+def create_for_platforms(**kwargs):
+ """Create step for Docker containers that are platform components
+
+ This interface is responible for:
+
+ 1. Populating config information into Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **ctx.node.properties))
+
+
+def _lookup_service(service_component_name, consul_host=CONSUL_HOST,
+ with_port=False):
+ conn = dis.create_kv_conn(consul_host)
+ results = dis.lookup_service(conn, service_component_name)
+
+ if with_port:
+ # Just grab first
+ result = results[0]
+ return "{address}:{port}".format(address=result["ServiceAddress"],
+ port=result["ServicePort"])
+ else:
+ return results[0]["ServiceAddress"]
+
+
+def _verify_container(service_component_name, max_wait, consul_host=CONSUL_HOST):
+ """Verify that the container is healthy
+
+ Args:
+ -----
+ max_wait (integer): limit to how may attempts to make which translates to
+ seconds because each sleep is one second. 0 means infinite.
+
+ Return:
+ -------
+ True if component is healthy else a DockerPluginDeploymentError exception
+ will be raised.
+ """
+ num_attempts = 1
+
+ while True:
+ if dis.is_healthy(consul_host, service_component_name):
+ return True
+ else:
+ num_attempts += 1
+
+ if max_wait > 0 and max_wait < num_attempts:
+ raise DockerPluginDeploymentError("Container never became healthy")
+
+ time.sleep(1)
+
+
+def _create_and_start_container(container_name, image, docker_host,
+ consul_host=CONSUL_HOST, **kwargs):
+ """Create and start Docker container
+
+ This is the function that actually does more of the heavy lifting including
+ resolving the docker host to connect and common things to do in setting up
+ docker containers like making sure CONSUL_HOST gets set as the local docker
+ host ip.
+
+ This method raises DockerPluginDependencyNotReadyError
+ """
+ try:
+ # Setup for Docker operations
+
+ docker_host_ip = _lookup_service(docker_host, consul_host=consul_host)
+
+ client = doc.create_client(docker_host_ip, DOCKER_PORT)
+
+ hcp = doc.add_host_config_params_volumes(volumes=kwargs.get("volumes",
+ None))
+ hcp = doc.add_host_config_params_ports(ports=kwargs.get("ports", None),
+ host_config_params=hcp)
+ hcp = doc.add_host_config_params_dns(docker_host_ip,
+ host_config_params=hcp)
+
+ # NOTE: The critical env variable CONSUL_HOST is being assigned the
+ # docker host ip itself because there should be a local Consul agent. We
+ # want services to register with their local Consul agent.
+ # CONFIG_BINDING_SERVICE is here for backwards compatibility. This is a
+ # well-known name now.
+ platform_envs = { "CONSUL_HOST": docker_host_ip,
+ "CONFIG_BINDING_SERVICE": "config_binding_service" }
+ # NOTE: The order of the envs being passed in is **important**. The
+ # kwargs["envs"] getting passed in last ensures that manual overrides
+ # will override the hardcoded envs.
+ envs = doc.create_envs(container_name, platform_envs, kwargs.get("envs", {}))
+
+ # Do Docker operations
+
+ container = doc.create_container(client, image, container_name, envs, hcp)
+ container_id = doc.start_container(client, container)
+
+ return container_id
+ except (doc.DockerConnectionError, dis.DiscoveryConnectionError,
+ dis.DiscoveryServiceNotFoundError) as e:
+ raise DockerPluginDependencyNotReadyError(e)
+
+
+def _enhance_docker_params(**kwargs):
+ """Setup Docker envs"""
+ docker_config = kwargs.get("docker_config", {})
+
+ envs = kwargs.get("envs", {})
+ # NOTE: Healthchecks are optional until prepared to handle use cases that
+ # don't necessarily use http
+ envs_healthcheck = doc.create_envs_healthcheck(docker_config) \
+ if "healthcheck" in docker_config else {}
+ envs.update(envs_healthcheck)
+ kwargs["envs"] = envs
+
+ def combine_params(key, docker_config, kwargs):
+ v = docker_config.get(key, []) + kwargs.get(key, [])
+ if v:
+ kwargs[key] = v
+ return kwargs
+
+ # Add the lists of ports and volumes unintelligently - meaning just add the
+ # lists together with no deduping.
+ kwargs = combine_params("ports", docker_config, kwargs)
+ kwargs = combine_params("volumes", docker_config, kwargs)
+
+ return kwargs
+
+def _create_and_start_component(**kwargs):
+ """Create and start component (container)"""
+ image = kwargs["image"]
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+ docker_host = kwargs[SELECTED_CONTAINER_DESTINATION]
+ # Need to be picky and manually select out pieces because just using kwargs
+ # which contains everything confused the execution of
+ # _create_and_start_container because duplicate variables exist
+ sub_kwargs = { "volumes": kwargs.get("volumes", []),
+ "ports": kwargs.get("ports", None), "envs": kwargs.get("envs", {}) }
+
+ container_id = _create_and_start_container(service_component_name, image,
+ docker_host, **sub_kwargs)
+ kwargs[CONTAINER_ID] = container_id
+
+ # TODO: Use regular logging here
+ ctx.logger.info("Container started: {0}, {1}".format(container_id,
+ service_component_name))
+
+ return kwargs
+
+def _verify_component(**kwargs):
+ """Verify component (container) is healthy"""
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+ # TODO: "Consul doesn't make its first health check immediately upon registration.
+ # Instead it waits for the health check interval to pass."
+ # Possible enhancement is to read the interval (and possibly the timeout) from
+ # docker_config and multiply that by a number to come up with a more suitable
+ # max_wait.
+ max_wait = kwargs.get("max_wait", 300)
+
+ # Verify that the container is healthy
+
+ if _verify_container(service_component_name, max_wait):
+ container_id = kwargs[CONTAINER_ID]
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+
+ # TODO: Use regular logging here
+ ctx.logger.info("Container is healthy: {0}, {1}".format(container_id,
+ service_component_name))
+
+ return kwargs
+
+def _done_for_start(**kwargs):
+ ctx.instance.runtime_properties.update(kwargs)
+ ctx.logger.info("Done starting: {0}".format(kwargs["name"]))
+ return kwargs
+
+@wrap_error_handling_start
+@merge_inputs_for_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_components(**start_inputs):
+ """Create Docker container and start for components
+
+ This operation method is to be used with the DockerContainerForComponents
+ node type. After launching the container, the plugin will verify with Consul
+ that the app is up and healthy before terminating.
+ """
+ _done_for_start(
+ **_verify_component(
+ **_create_and_start_component(
+ **_enhance_docker_params(**start_inputs))))
+
+
+def _update_delivery_url(**kwargs):
+ """Update the delivery url for data router subscribers"""
+ dr_subs = [kwargs[s["name"]] for s in kwargs["streams_subscribes"] \
+ if s["type"] == "data_router"]
+
+ if dr_subs:
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+ # TODO: Should NOT be setting up the delivery url with ip addresses
+ # because in the https case, this will not work because data router does
+ # a certificate validation using the fqdn.
+ subscriber_host = _lookup_service(service_component_name, with_port=True)
+
+ for dr_sub in dr_subs:
+ scheme = dr_sub["scheme"] if "scheme" in dr_sub else DEFAULT_SCHEME
+ path = dr_sub["route"]
+ dr_sub["delivery_url"] = "{scheme}://{host}/{path}".format(
+ scheme=scheme, host=subscriber_host, path=path)
+ kwargs[dr_sub["name"]] = dr_sub
+
+ return kwargs
+
+@wrap_error_handling_start
+@merge_inputs_for_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_components_with_streams(**start_inputs):
+ """Create Docker container and start for components that have streams
+
+ This operation method is to be used with the DockerContainerForComponents
+ node type. After launching the container, the plugin will verify with Consul
+ that the app is up and healthy before terminating.
+ """
+ _done_for_start(
+ **_update_delivery_url(
+ **_verify_component(
+ **_create_and_start_component(
+ **_enhance_docker_params(**start_inputs)))))
+
+
+@wrap_error_handling_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_platforms(**kwargs):
+ """Create Docker container and start for platform services
+
+ This operation method is to be used with the DockerContainerForPlatforms
+ node type. After launching the container, the plugin will verify with Consul
+ that the app is up and healthy before terminating.
+ """
+ image = ctx.node.properties["image"]
+ docker_config = ctx.node.properties.get("docker_config", {})
+ service_component_name = ctx.node.properties["name"]
+
+ docker_host = ctx.instance.runtime_properties[SELECTED_CONTAINER_DESTINATION]
+
+ envs = kwargs.get("envs", {})
+ # NOTE: Healthchecks are optional until prepared to handle use cases that
+ # don't necessarily use http
+ envs_healthcheck = doc.create_envs_healthcheck(docker_config) \
+ if "healthcheck" in docker_config else {}
+ envs.update(envs_healthcheck)
+ kwargs["envs"] = envs
+
+ host_port = ctx.node.properties["host_port"]
+ container_port = ctx.node.properties["container_port"]
+
+ # Cloudify properties are all required and Cloudify complains that None
+ # is not a valid type for integer. Defaulting to 0 to indicate to not
+ # use this and not to set a specific port mapping in cases like service
+ # change handler.
+ if host_port != 0 and container_port != 0:
+ # Doing this because other nodes might want to use this property
+ port_mapping = "{cp}:{hp}".format(cp=container_port, hp=host_port)
+ ports = kwargs.get("ports", []) + [ port_mapping ]
+ kwargs["ports"] = ports
+ if "ports" not in kwargs:
+ ctx.logger.warn("No port mappings defined. Will randomly assign port.")
+
+ container_id = _create_and_start_container(service_component_name, image,
+ docker_host, **kwargs)
+ ctx.instance.runtime_properties[CONTAINER_ID] = container_id
+
+ ctx.logger.info("Container started: {0}, {1}".format(container_id,
+ service_component_name))
+
+ # Verify that the container is healthy
+
+ max_wait = kwargs.get("max_wait", 300)
+
+ if _verify_container(service_component_name, max_wait):
+ ctx.logger.info("Container is healthy: {0}, {1}".format(container_id,
+ service_component_name))
+
+
+@wrap_error_handling_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container(**kwargs):
+ """Create Docker container and start"""
+ service_component_name = ctx.node.properties["name"]
+ ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME] = service_component_name
+
+ image = ctx.node.properties["image"]
+ docker_host = ctx.instance.runtime_properties[SELECTED_CONTAINER_DESTINATION]
+
+ container_id = _create_and_start_container(service_component_name, image,
+ docker_host, **kwargs)
+ ctx.instance.runtime_properties[CONTAINER_ID] = container_id
+
+ ctx.logger.info("Container started: {0}, {1}".format(container_id,
+ service_component_name))
+
+
+@monkeypatch_loggers
+@operation
+def stop_and_remove_container(**kwargs):
+ """Stop and remove Docker container"""
+ try:
+ docker_host = ctx.instance.runtime_properties[SELECTED_CONTAINER_DESTINATION]
+
+ docker_host_ip = _lookup_service(docker_host)
+
+ client = doc.create_client(docker_host_ip, DOCKER_PORT)
+
+ container_id = ctx.instance.runtime_properties[CONTAINER_ID]
+ doc.stop_then_remove_container(client, container_id)
+
+ cleanup_image = kwargs.get("cleanup_image", False)
+
+ if cleanup_image:
+ image = ctx.node.properties["image"]
+
+ if doc.remove_image(client, image):
+ ctx.logger.info("Removed Docker image: {0}".format(image))
+ else:
+ ctx.logger.warn("Couldnot remove Docker image: {0}".format(image))
+ except (doc.DockerConnectionError, dis.DiscoveryConnectionError,
+ dis.DiscoveryServiceNotFoundError) as e:
+ raise RecoverableError(e)
+ except Exception as e:
+ ctx.logger.error("Unexpected error while stopping container: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+@monkeypatch_loggers
+@operation
+def cleanup_discovery(**kwargs):
+ """Delete configuration from Consul"""
+ service_component_name = ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+
+ try:
+ conn = dis.create_kv_conn(CONSUL_HOST)
+ dis.remove_service_component_config(conn, service_component_name)
+ except dis.DiscoveryConnectionError as e:
+ raise RecoverableError(e)
+
+
+# Lifecycle interface calls for dcae.nodes.DockerHost
+
+@operation
+def select_docker_host(**kwargs):
+ selected_docker_host = ctx.node.properties['docker_host_override']
+
+ if selected_docker_host:
+ ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME] = selected_docker_host
+ ctx.logger.info("Selected Docker host: {0}".format(selected_docker_host))
+ else:
+ raise NonRecoverableError("Failed to find a suitable Docker host")
+
+@operation
+def unselect_docker_host(**kwargs):
+ del ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+ ctx.logger.info("Unselected Docker host")
+
diff --git a/docker/dockerplugin/utils.py b/docker/dockerplugin/utils.py
new file mode 100644
index 0000000..ed680c2
--- /dev/null
+++ b/docker/dockerplugin/utils.py
@@ -0,0 +1,28 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import string
+import random
+
+
+def random_string(n):
+ """Random generate an ascii string of "n" length"""
+ corpus = string.ascii_lowercase + string.ascii_uppercase + string.digits
+ return ''.join(random.choice(corpus) for x in range(n))
diff --git a/docker/examples/blueprint-laika-dmaap-pubs.yaml b/docker/examples/blueprint-laika-dmaap-pubs.yaml
new file mode 100644
index 0000000..4616b0c
--- /dev/null
+++ b/docker/examples/blueprint-laika-dmaap-pubs.yaml
@@ -0,0 +1,148 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+ This Blueprint installs a chain of two laika instances on a Docker cluster
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/docker/2.2.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/relationship/1.0.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/dmaap/1.1.0/dmaap.yaml
+
+inputs:
+
+ service_id:
+ description: Unique id used for an instance of this DCAE service. Use deployment id
+ default: 'foobar'
+
+ topic00_aaf_username:
+ topic00_aaf_password:
+ topic00_location:
+ default: mtc5
+ topic00_client_role:
+
+ topic01_aaf_username:
+ topic01_aaf_password:
+ topic01_location:
+ default: mtc5
+ topic01_client_role:
+
+ feed00_location:
+ default: mtc5
+
+ feed01_location:
+ default: mtc5
+
+ topic00fqtn:
+ type: string
+ topic01fqtn:
+ type: string
+ laika_image:
+ type: string
+
+node_templates:
+
+ topic00:
+ type: dcae.nodes.ExistingTopic
+ properties:
+ fqtn: { get_input : topic00fqtn }
+
+ topic01:
+ type: dcae.nodes.ExistingTopic
+ properties:
+ fqtn: { get_input : topic01fqtn }
+
+ feed00:
+ type: dcae.nodes.Feed
+ properties:
+ # NOTE: Had to manually make unique feed names per test because I've been told there's
+ # an issue with feeds not being deleted by uninstall.
+ feed_name: "feed00-pub-laika"
+ feed_description: "Feed00 to test pub for laika"
+ feed_version: 1.0.0
+ aspr_classification: "unclassified"
+
+ feed01:
+ type: dcae.nodes.Feed
+ properties:
+ feed_name: "feed01-pub-laika"
+ feed_description: "Feed01 to test pub for laika"
+ feed_version: 1.0.0
+ aspr_classification: "unclassified"
+
+ laika-one:
+ type: dcae.nodes.DockerContainerForComponentsUsingDmaap
+ properties:
+ service_component_type:
+ 'laika'
+ service_id:
+ { get_input: service_id }
+ location_id:
+ 'rework-central'
+ application_config:
+ some-param: "Lorem ipsum dolor sit amet"
+ streams_publishes:
+ topic-alpha:
+ aaf_username: { get_input: topic00_aaf_username }
+ aaf_password: { get_input: topic00_aaf_password }
+ type: "message_router"
+ dmaap_info: "<< topic00 >>"
+ topic-beta:
+ aaf_username: { get_input: topic01_aaf_username }
+ aaf_password: { get_input: topic01_aaf_password }
+ type: "message_router"
+ dmaap_info: "<< topic01 >>"
+ feed-gamma:
+ type: "data_router"
+ dmaap_info: "<< feed00 >>"
+ feed-kappa:
+ type: "data_router"
+ dmaap_info: "<< feed01 >>"
+ streams_subscribes: {}
+ services_calls: {}
+ image: { get_input : laika_image }
+ docker_config:
+ healthcheck:
+ type: "http"
+ endpoint: "/health"
+ streams_publishes:
+ - name: topic00
+ location: { get_input: topic00_location }
+ client_role: { get_input: topic00_client_role }
+ type: message_router
+ - name: topic01
+ location: { get_input: topic01_location }
+ client_role: { get_input: topic01_client_role }
+ type: message_router
+ - name: feed00
+ location: { get_input: feed00_location }
+ type: data_router
+ - name: feed01
+ location: { get_input: feed01_location }
+ type: data_router
+ streams_subscribes: []
+ relationships:
+ - type: dcae.relationships.component_contained_in
+ target: docker_host
+ - type: dcae.relationships.publish_events
+ target: topic00
+ - type: dcae.relationships.publish_events
+ target: topic01
+ - type: dcae.relationships.publish_files
+ target: feed00
+ - type: dcae.relationships.publish_files
+ target: feed01
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ stop:
+ inputs:
+ cleanup_image:
+ False
+
+ docker_host:
+ type: dcae.nodes.SelectedDockerHost
+ properties:
+ location_id:
+ 'rework-central'
+ docker_host_override:
+ 'component_dockerhost'
diff --git a/docker/examples/blueprint-laika-dmaap-pubsub.yaml b/docker/examples/blueprint-laika-dmaap-pubsub.yaml
new file mode 100644
index 0000000..bcbbb17
--- /dev/null
+++ b/docker/examples/blueprint-laika-dmaap-pubsub.yaml
@@ -0,0 +1,150 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+ This Blueprint installs a chain of two laika instances on a Docker cluster
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/docker/2.2.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/relationship/1.0.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/dmaap/1.1.0/dmaap.yaml
+
+inputs:
+
+ service_id:
+ description: Unique id used for an instance of this DCAE service. Use deployment id
+ default: 'foobar'
+
+ topic00_aaf_username:
+ topic00_aaf_password:
+ topic00_location:
+ default: mtc5
+ topic00_client_role:
+
+ topic01_aaf_username:
+ topic01_aaf_password:
+ topic01_location:
+ default: mtc5
+ topic01_client_role:
+
+ feed00_location:
+ default: mtc5
+
+ feed01_location:
+ default: mtc5
+
+ topic00fqtn:
+ type: string
+ topic01fqtn:
+ type: string
+ laika_image:
+ type: string
+
+node_templates:
+
+ topic00:
+ type: dcae.nodes.ExistingTopic
+ properties:
+ fqtn: { get_input : topic00fqtn }
+
+ topic01:
+ type: dcae.nodes.ExistingTopic
+ properties:
+ fqtn: { get_input : topic01fqtn }
+
+ feed00:
+ type: dcae.nodes.Feed
+ properties:
+ # NOTE: Had to manually make unique feed names per test because I've been told there's
+ # an issue with feeds not being deleted by uninstall.
+ feed_name: "feed00-pub-laika"
+ feed_description: "Feed00 to test pub for laika"
+ feed_version: 1.0.0
+ aspr_classification: "unclassified"
+
+ feed01:
+ type: dcae.nodes.Feed
+ properties:
+ feed_name: "feed01-sub-laika"
+ feed_description: "Feed01 to test sub for laika"
+ feed_version: 1.0.0
+ aspr_classification: "unclassified"
+
+ laika-one:
+ type: dcae.nodes.DockerContainerForComponentsUsingDmaap
+ properties:
+ service_component_type:
+ 'laika'
+ service_id:
+ { get_input: service_id }
+ location_id:
+ 'rework-central'
+ application_config:
+ some-param: "Lorem ipsum dolor sit amet"
+ streams_publishes:
+ my-publishing-topic:
+ aaf_username: { get_input: topic00_aaf_username }
+ aaf_password: { get_input: topic00_aaf_password }
+ type: "message_router"
+ dmaap_info: "<< topic00 >>"
+ my-publishing-feed:
+ type: "data_router"
+ dmaap_info: "<< feed00 >>"
+ streams_subscribes:
+ my-subscribing-topic:
+ aaf_username: { get_input: topic01_aaf_username }
+ aaf_password: { get_input: topic01_aaf_password }
+ type: "message_router"
+ dmaap_info: "<< topic01 >>"
+ my-subscribing-feed:
+ type: "data_router"
+ dmaap_info: "<< feed01 >>"
+ services_calls: {}
+ image: { get_input : laika_image }
+ docker_config:
+ healthcheck:
+ type: "http"
+ endpoint: "/health"
+ streams_publishes:
+ - name: topic00
+ location: { get_input: topic00_location }
+ client_role: { get_input: topic00_client_role }
+ type: message_router
+ - name: feed00
+ location: { get_input: feed00_location }
+ type: data_router
+ streams_subscribes:
+ - name: topic01
+ location: { get_input: topic01_location }
+ client_role: { get_input: topic01_client_role }
+ type: message_router
+ - name: feed01
+ location: { get_input: feed01_location }
+ type: data_router
+ route: identity
+ scheme: https
+ relationships:
+ - type: dcae.relationships.component_contained_in
+ target: docker_host
+ - type: dcae.relationships.publish_events
+ target: topic00
+ - type: dcae.relationships.subscribe_to_events
+ target: topic01
+ - type: dcae.relationships.publish_files
+ target: feed00
+ - type: dcae.relationships.subscribe_to_files
+ target: feed01
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ stop:
+ inputs:
+ cleanup_image:
+ False
+
+ docker_host:
+ type: dcae.nodes.SelectedDockerHost
+ properties:
+ location_id:
+ 'rework-central'
+ docker_host_override:
+ 'component_dockerhost'
diff --git a/docker/examples/blueprint-laika-dmaap-subs.yaml b/docker/examples/blueprint-laika-dmaap-subs.yaml
new file mode 100644
index 0000000..f6d0b3a
--- /dev/null
+++ b/docker/examples/blueprint-laika-dmaap-subs.yaml
@@ -0,0 +1,156 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+ This Blueprint installs a chain of two laika instances on a Docker cluster
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/docker/2.2.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/relationship/1.0.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/dmaap/1.1.0/dmaap.yaml
+
+
+inputs:
+
+ service_id:
+ description: Unique id used for an instance of this DCAE service. Use deployment id
+ default: 'foobar'
+
+ topic00_aaf_username:
+ topic00_aaf_password:
+ topic00_location:
+ default: mtc5
+ topic00_client_role:
+
+ topic01_aaf_username:
+ topic01_aaf_password:
+ topic01_location:
+ default: mtc5
+ topic01_client_role:
+
+ feed00_location:
+ default: mtc5
+
+ feed01_location:
+ default: mtc5
+
+ topic00fqtn:
+ type: string
+ topic01fqtn:
+ type: string
+ laika_image:
+ type: string
+
+node_templates:
+
+ topic00:
+ type: dcae.nodes.ExistingTopic
+ properties:
+ fqtn: { get_input : topic00fqtn }
+
+ topic01:
+ type: dcae.nodes.ExistingTopic
+ properties:
+ fqtn: { get_input : topic01fqtn }
+
+ feed00:
+ type: dcae.nodes.Feed
+ properties:
+ # NOTE: Had to manually make unique feed names per test because I've been told there's
+ # an issue with feeds not being deleted by uninstall.
+ feed_name: "feed00-sub-laika"
+ feed_description: "Feed00 to test sub for laika"
+ feed_version: 1.0.0
+ aspr_classification: "unclassified"
+
+ feed01:
+ type: dcae.nodes.Feed
+ properties:
+ feed_name: "feed01-sub-laika"
+ feed_description: "Feed01 to test sub for laika"
+ feed_version: 1.0.0
+ aspr_classification: "unclassified"
+
+ laika-one:
+ type: dcae.nodes.DockerContainerForComponentsUsingDmaap
+ properties:
+ service_component_type:
+ 'laika'
+ service_id:
+ { get_input: service_id }
+ location_id:
+ 'rework-central'
+ application_config:
+ some-param: "Lorem ipsum dolor sit amet"
+ streams_publishes: {}
+ streams_subscribes:
+ topic-alpha:
+ aaf_username: { get_input: topic00_aaf_username }
+ aaf_password: { get_input: topic00_aaf_password }
+ type: "message_router"
+ dmaap_info: "<< topic00 >>"
+ topic-beta:
+ aaf_username: { get_input: topic01_aaf_username }
+ aaf_password: { get_input: topic01_aaf_password }
+ type: "message_router"
+ dmaap_info: "<< topic01 >>"
+ feed-gamma:
+ type: "data_router"
+ dmaap_info: "<< feed00 >>"
+ feed-kappa:
+ type: "data_router"
+ dmaap_info: "<< feed01 >>"
+ services_calls: {}
+ image: { get_input : laika_image }
+ docker_config:
+ healthcheck:
+ type: "http"
+ endpoint: "/health"
+ streams_publishes: []
+ streams_subscribes:
+ - name: topic00
+ location: { get_input: topic00_location }
+ client_role: { get_input: topic00_client_role }
+ type: message_router
+ - name: topic01
+ location: { get_input: topic01_location }
+ client_role: { get_input: topic01_client_role }
+ type: message_router
+ - name: feed00
+ location: { get_input: feed00_location }
+ type: data_router
+ username: king
+ password: !!str 123456
+ route: identity
+ scheme: https
+ # This feed should have username/password generated
+ - name: feed01
+ location: { get_input: feed01_location }
+ type: data_router
+ route: identity
+ scheme: https
+ relationships:
+ - type: dcae.relationships.component_contained_in
+ target: docker_host
+ - type: dcae.relationships.subscribe_to_events
+ target: topic00
+ - type: dcae.relationships.subscribe_to_events
+ target: topic01
+ - type: dcae.relationships.subscribe_to_files
+ target: feed00
+ - type: dcae.relationships.subscribe_to_files
+ target: feed01
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ stop:
+ inputs:
+ cleanup_image:
+ False
+
+ docker_host:
+ type: dcae.nodes.SelectedDockerHost
+ properties:
+ location_id:
+ 'rework-central'
+ docker_host_override:
+ 'component_dockerhost'
diff --git a/docker/examples/blueprint-laika.yaml b/docker/examples/blueprint-laika.yaml
new file mode 100644
index 0000000..9a8dc46
--- /dev/null
+++ b/docker/examples/blueprint-laika.yaml
@@ -0,0 +1,89 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+ This Blueprint installs a chain of two laika instances on a Docker cluster
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/docker/2.3.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/relationship/1.0.0/node-type.yaml
+
+inputs:
+
+ service_id:
+ description: Unique id used for an instance of this DCAE service. Use deployment id
+ default: 'foobar'
+ laika_image:
+ type: string
+
+
+node_templates:
+
+ laika-zero:
+ type: dcae.nodes.DockerContainerForComponents
+ properties:
+ service_component_type:
+ 'laika'
+ service_id:
+ { get_input: service_id }
+ location_id:
+ 'rework-central'
+ application_config:
+ some-param: "Lorem ipsum dolor sit amet"
+ downstream-laika: "{{ laika }}"
+ image: { get_input : laika_image }
+ docker_config:
+ healthcheck:
+ type: "http"
+ endpoint: "/health"
+ relationships:
+ # Link to downstream laika
+ - type: dcae.relationships.component_connected_to
+ target: laika-one
+ - type: dcae.relationships.component_contained_in
+ target: docker_host
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ inputs:
+ ports:
+ - "8080:5432"
+ envs:
+ SOME-ENV: "BAM"
+ max_wait:
+ 120
+ stop:
+ inputs:
+ cleanup_image:
+ False
+
+ laika-one:
+ type: dcae.nodes.DockerContainerForComponents
+ properties:
+ service_component_type:
+ 'laika'
+ service_id:
+ { get_input: service_id }
+ location_id:
+ 'rework-central'
+ application_config:
+ some-param: "Lorem ipsum dolor sit amet"
+ image: { get_input : laika_image }
+ # Trying without health check
+ relationships:
+ - type: dcae.relationships.component_contained_in
+ target: docker_host
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ stop:
+ inputs:
+ cleanup_image:
+ False
+
+ docker_host:
+ type: dcae.nodes.SelectedDockerHost
+ properties:
+ location_id:
+ 'rework-central'
+ docker_host_override:
+ 'platform_dockerhost'
diff --git a/docker/examples/blueprint-registrator.yaml b/docker/examples/blueprint-registrator.yaml
new file mode 100644
index 0000000..d9d6449
--- /dev/null
+++ b/docker/examples/blueprint-registrator.yaml
@@ -0,0 +1,47 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+ This Blueprint installs registrator on a Docker host
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/docker/2.3.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/relationship/1.0.0/node-type.yaml
+
+inputs:
+ registrator-image:
+ type: string
+ external_ip:
+ type: string
+
+node_templates:
+
+ registrator:
+ type: dcae.nodes.DockerContainer
+ properties:
+ name:
+ 'test-registrator'
+ image: { get_input : registrator-image }
+ relationships:
+ - type: dcae.relationships.component_contained_in
+ target: docker_host
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ inputs:
+ envs:
+ EXTERNAL_IP: { get_input : external_ip }
+ volumes:
+ - host:
+ path: '/var/run/docker.sock'
+ container:
+ bind: '/tmp/docker.sock'
+ mode: 'ro'
+
+ docker_host:
+ type: dcae.nodes.SelectedDockerHost
+ properties:
+ location_id:
+ 'rework-central'
+ docker_host_override:
+ 'platform_dockerhost'
diff --git a/docker/requirements.txt b/docker/requirements.txt
new file mode 100644
index 0000000..c76c229
--- /dev/null
+++ b/docker/requirements.txt
@@ -0,0 +1,3 @@
+# TODO: May need to add the following line
+# --extra-index-url <onap pypi url>
+python-dockering==1.2.0
diff --git a/docker/setup.py b/docker/setup.py
new file mode 100644
index 0000000..9cdef0e
--- /dev/null
+++ b/docker/setup.py
@@ -0,0 +1,36 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import os
+from setuptools import setup
+
+setup(
+ name='dockerplugin',
+ description='Cloudify plugin for applications run in Docker containers',
+ version="2.3.0",
+ author='Michael Hwang, Tommy Carpenter',
+ packages=['dockerplugin'],
+ zip_safe=False,
+ install_requires=[
+ "python-consul>=0.6.0,<1.0.0",
+ "python-dockering>=1.0.0,<2.0.0",
+ "uuid==1.30"
+ ]
+)
diff --git a/docker/tests/test_discovery.py b/docker/tests/test_discovery.py
new file mode 100644
index 0000000..9a18519
--- /dev/null
+++ b/docker/tests/test_discovery.py
@@ -0,0 +1,40 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import pytest
+from functools import partial
+import requests
+from dockerplugin import discovery as dis
+
+
+def test_wrap_consul_call():
+ def foo(a, b, c="default"):
+ return " ".join([a, b, c])
+
+ wrapped_foo = partial(dis._wrap_consul_call, foo)
+ assert wrapped_foo("hello", "world") == "hello world default"
+ assert wrapped_foo("hello", "world", c="new masters") == "hello world new masters"
+
+ def foo_connection_error(a, b, c):
+ raise requests.exceptions.ConnectionError("simulate failed connection")
+
+ wrapped_foo = partial(dis._wrap_consul_call, foo_connection_error)
+ with pytest.raises(dis.DiscoveryConnectionError):
+ wrapped_foo("a", "b", "c")
diff --git a/docker/tests/test_tasks.py b/docker/tests/test_tasks.py
new file mode 100644
index 0000000..74482c6
--- /dev/null
+++ b/docker/tests/test_tasks.py
@@ -0,0 +1,198 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import copy
+import pytest
+from cloudify.exceptions import NonRecoverableError
+import dockerplugin
+from dockerplugin import tasks
+
+
+def test_parse_streams(monkeypatch):
+ # Good case for streams_publishes
+ test_input = { "streams_publishes": [{"name": "topic00", "type": "message_router"},
+ {"name": "feed00", "type": "data_router"}],
+ "streams_subscribes": {} }
+
+ expected = {'feed00': {'type': 'data_router', 'name': 'feed00'},
+ 'streams_publishes': [{'type': 'message_router', 'name': 'topic00'},
+ {'type': 'data_router', 'name': 'feed00'}],
+ 'streams_subscribes': {},
+ 'topic00': {'type': 'message_router', 'name': 'topic00'}
+ }
+
+ assert expected == tasks._parse_streams(**test_input)
+
+ # Good case for streams_subscribes (password provided)
+ test_input = { "streams_publishes": {},
+ "streams_subscribes": [{"name": "topic01", "type": "message_router"},
+ {"name": "feed01", "type": "data_router", "username": "hero",
+ "password": "123456"}] }
+
+ expected = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+
+ assert expected == tasks._parse_streams(**test_input)
+
+ # Good case for streams_subscribes (password generated)
+ test_input = { "streams_publishes": {},
+ "streams_subscribes": [{"name": "topic01", "type": "message_router"},
+ {"name": "feed01", "type": "data_router", "username": None,
+ "password": None}] }
+
+ def not_so_random(n):
+ return "nosurprise"
+
+ monkeypatch.setattr(dockerplugin.utils, "random_string", not_so_random)
+
+ expected = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'nosurprise', 'password': 'nosurprise'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': None,
+ 'password': None}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+
+ assert expected == tasks._parse_streams(**test_input)
+
+
+def test_setup_for_discovery_streams(monkeypatch):
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["name"] = "some-foo-service-component"
+
+ # Good case
+ def fake_add_to_entry(conn, key, add_name, add_value):
+ """
+ This fake method will check all the pieces that are used to make store
+ details in Consul
+ """
+ if key != test_input["name"] + ":dmaap":
+ return None
+ if add_name != "feed01":
+ return None
+ if add_value != {"location": "Bedminster", "delivery_url": None,
+ "username": "hero", "password": "123456", "subscriber_id": None}:
+ return None
+
+ return "SUCCESS!"
+
+ monkeypatch.setattr(dockerplugin.discovery, "add_to_entry",
+ fake_add_to_entry)
+
+ assert tasks._setup_for_discovery_streams(**test_input) == test_input
+
+ # Good case - no data router subscribers
+ test_input = {"streams_publishes": [{"name": "topic00", "type": "message_router"}],
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'}]}
+ test_input["name"] = "some-foo-service-component"
+
+ assert tasks._setup_for_discovery_streams(**test_input) == test_input
+
+ # Bad case - something happened from the Consul call
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["name"] = "some-foo-service-component"
+
+ def barf(conn, key, add_name, add_value):
+ raise RuntimeError("Barf")
+
+ monkeypatch.setattr(dockerplugin.discovery, "add_to_entry",
+ barf)
+
+ with pytest.raises(NonRecoverableError):
+ tasks._setup_for_discovery_streams(**test_input)
+
+
+def test_update_delivery_url(monkeypatch):
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster',
+ 'route': 'some-path'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster',
+ 'route': 'some-path'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["service_component_name"] = "some-foo-service-component"
+
+ def fake_lookup_service(name, with_port=False):
+ if with_port:
+ return "10.100.1.100:8080"
+ else:
+ return
+
+ monkeypatch.setattr(dockerplugin.tasks, "_lookup_service",
+ fake_lookup_service)
+
+ expected = copy.deepcopy(test_input)
+ expected["feed01"]["delivery_url"] = "http://10.100.1.100:8080/some-path"
+
+ assert tasks._update_delivery_url(**test_input) == expected
+
+
+def test_enhance_docker_params():
+ # Good - Test empty docker config
+
+ test_kwargs = { "docker_config": {} }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {}, 'docker_config': {}}
+
+ # Good - Test just docker config ports and volumes
+
+ test_kwargs = { "docker_config": { "ports": ["1:1", "2:2"],
+ "volumes": [{"container": "somewhere", "host": "somewhere else"}] } }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {}, 'docker_config': {'ports': ['1:1', '2:2'],
+ 'volumes': [{'host': 'somewhere else', 'container': 'somewhere'}]},
+ 'ports': ['1:1', '2:2'], 'volumes': [{'host': 'somewhere else',
+ 'container': 'somewhere'}]}
+
+ # Good - Test just docker config ports and volumes with overrrides
+
+ test_kwargs = { "docker_config": { "ports": ["1:1", "2:2"],
+ "volumes": [{"container": "somewhere", "host": "somewhere else"}] },
+ "ports": ["3:3", "4:4"], "volumes": [{"container": "nowhere", "host":
+ "nowhere else"}]}
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {}, 'docker_config': {'ports': ['1:1', '2:2'],
+ 'volumes': [{'host': 'somewhere else', 'container': 'somewhere'}]},
+ 'ports': ['1:1', '2:2', '3:3', '4:4'], 'volumes': [{'host': 'somewhere else',
+ 'container': 'somewhere'}, {'host': 'nowhere else', 'container':
+ 'nowhere'}]}
+
diff --git a/relationships/.gitignore b/relationships/.gitignore
new file mode 100644
index 0000000..5c75135
--- /dev/null
+++ b/relationships/.gitignore
@@ -0,0 +1,93 @@
+cfyhelper.sh
+.cloudify/
+*.wgn
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+.hypothesis/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# dotenv
+.env
+
+# virtualenv
+.venv/
+venv/
+ENV/
+
+# Spyder project settings
+.spyderproject
+
+# Rope project settings
+.ropeproject
diff --git a/relationships/LICENSE.txt b/relationships/LICENSE.txt
new file mode 100644
index 0000000..cb8008a
--- /dev/null
+++ b/relationships/LICENSE.txt
@@ -0,0 +1,32 @@
+============LICENSE_START=======================================================
+org.onap.dcae
+================================================================================
+Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+
+ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+
+Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+===================================================================
+Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+you may not use this documentation except in compliance with the License.
+You may obtain a copy of the License at
+ https://creativecommons.org/licenses/by/4.0/
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/relationships/README.md b/relationships/README.md
new file mode 100644
index 0000000..82d60f3
--- /dev/null
+++ b/relationships/README.md
@@ -0,0 +1,54 @@
+# relationships-cloudify
+
+This repo contains Cloudify artifacts to support custom functionality in processing relationships between nodes.
+
+* `component_connected_to` - Used to connect service component nodes together
+* `component_contained_in` - Used to place service component nodes on their proper container host or cluster
+
+## `component_connected_to`
+
+This Cloudify relationship is used to collect the relationship information of all the target nodes of a source node and to provide this information to the source node application. Currently, this information is provided by being stored in the platform Consul instance under the key `<source node's name>:rel`.
+
+Each target node is expected to provide its own name (name used for service registration) to the source node. These target names are passed as a runtime property of the target node context under the key `service_component_name`.
+
+### Special features
+
+#### `target_name_override`
+
+The preconfigure operation uses the task function `add_relationship` which has an *optional* input parameter `target_name_override`. The target name is passed into the source node's relationship information that is used by the source node underlying application to connect to the target node underlying application. If not used, the default behavior is to expect the target name to come from the target node as a runtime property under the key `service_component_name`.
+
+##### When should you use this?
+
+When you know the target node does not populate `service_component_name` into its runtime properties.
+
+##### Usage example
+
+Here is an example of how you would use the `target_name_override` input parameter in a blueprint:
+
+```yaml
+node_templates:
+
+ some-source:
+ type: dcae.nodes.rework.DockerContainer
+ properties:
+ service_component_type:
+ 'laika'
+ service_id:
+ { get_input: service_id }
+ location_id:
+ 'rework-central'
+ application_config:
+ { get_input: some-source-config }
+ image:
+ 'dcae-rework/laika:0.4.0'
+ relationships:
+ - type: dcae.relationships.component_connected_to
+ target: some-target
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure:
+ inputs:
+ target_name_override:
+ 'some-target'
+```
+
diff --git a/relationships/example_register_to_blueprint.yaml b/relationships/example_register_to_blueprint.yaml
new file mode 100644
index 0000000..52ee40b
--- /dev/null
+++ b/relationships/example_register_to_blueprint.yaml
@@ -0,0 +1,24 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/relationship/1.0.0/node-type.yaml
+
+node_templates:
+
+ src:
+ type: cloudify.nodes.Root
+ relationships:
+ - type: dcae.relationships.component_registered_to
+ target: tgt #agree this is kind of weird to be a relationship type now with a dummy target
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure:
+ inputs:
+ address_to_register: "666.666.666.666"
+ port_to_register: "666"
+ name_to_register: "TEST_REGISTERED_TO_SERVICE"
+ location_id: "rework-central"
+
+ tgt: #do relationships always need targets?
+ type: cloudify.nodes.Root
diff --git a/relationships/relationship-types.yaml b/relationships/relationship-types.yaml
new file mode 100644
index 0000000..d0ab59f
--- /dev/null
+++ b/relationships/relationship-types.yaml
@@ -0,0 +1,65 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+
+plugins:
+ relationships:
+ executor: 'central_deployment_agent'
+ package_name: relationshipplugin
+ package_version: 1.0.0
+
+relationships:
+ # The relationship type here is to be used between service component nodes. What is achieved here is
+ # functionality in providing this relationship information to the service components so that they can
+ # do service discovery.
+ #
+ # This function will create/add to the rels list for components. So going from a "collector node -> analytics node"
+ # for example, this is kind of the edge and will add:
+ #
+ # ```
+ # "collector_name:rel": ["analytics_name"]
+ # ```
+ #
+ # To the key value store.
+ #
+ dcae.relationships.component_connected_to:
+ derived_from: cloudify.relationships.connected_to
+ # These operations are for adding and removing relationships from Consul
+ # http://getcloudify.org/guide/3.1/reference-builtin-workflows.html
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure:
+ # Adds target to the source relationship list
+ implementation: relationships.relationshipplugin.add_relationship
+ unlink:
+ # Removes target from the source relationship list
+ implementation: relationships.relationshipplugin.remove_relationship
+
+ dcae.relationships.component_contained_in:
+ derived_from: cloudify.relationships.contained_in
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure:
+ implementation: relationships.relationshipplugin.forward_destination_info
+ # TODO: Is there a need for "unlink"?
+
+ dcae.relationships.component_registered_to:
+ #Operates on a relationship A -> B and makes the following assumptions:
+ derived_from: cloudify.relationships.connected_to
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure:
+ implementation: relationships.relationshipplugin.tasks.registered_to
+ inputs:
+ address_to_register:
+ type: string
+ port_to_register:
+ type: string
+ name_to_register:
+ type: string
+ unlink:
+ implementation: relationships.relationshipplugin.tasks.registered_to_delete
+
+
+
diff --git a/relationships/relationshipplugin/__init__.py b/relationships/relationshipplugin/__init__.py
new file mode 100644
index 0000000..259e52c
--- /dev/null
+++ b/relationships/relationshipplugin/__init__.py
@@ -0,0 +1,23 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+
+from .tasks import add_relationship, remove_relationship, \
+ forward_destination_info
diff --git a/relationships/relationshipplugin/discovery.py b/relationships/relationshipplugin/discovery.py
new file mode 100644
index 0000000..bd0e369
--- /dev/null
+++ b/relationships/relationshipplugin/discovery.py
@@ -0,0 +1,84 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+from urlparse import urlparse
+import json
+import consul
+
+
+class DiscoveryError(RuntimeError):
+ pass
+
+def _create_rel_key(service_component_name):
+ return "{0}:rel".format(service_component_name)
+
+def _parse_host(host):
+ """Parse host string
+
+ Returns:
+ Tuple of the hostname and port to use to connect to Consul
+ """
+ def parse_urlparse_result(pr):
+ if not pr.hostname:
+ raise DiscoveryError("Invalid Consul host provided: {0}".format(host))
+
+ try:
+ # Port 8500 is the Consul default
+ return (pr.hostname, pr.port if pr.port else 8500)
+ except ValueError as e:
+ # Something bad happened with port
+ raise DiscoveryError("Invalid Consul host provided: {0}".format(host))
+
+ pr = urlparse(host)
+
+ # urlparse requires scheme to be set in order to be useful
+ if pr.scheme and pr.netloc:
+ return parse_urlparse_result(pr)
+ else:
+ return parse_urlparse_result(urlparse("http://{0}".format(host)))
+
+
+def create_kv_conn(host):
+ """Create connection to key-value store
+
+ Returns a Consul client to the specified Consul host
+ """
+ (hostname, port) = _parse_host(host)
+ return consul.Consul(host=hostname, port=port)
+
+def store_relationship(kv_conn, source_name, target_name):
+ # TODO: Rel entry may already exist in a one-to-many situation. Need to
+ # support that.
+ rel_key = _create_rel_key(source_name)
+ rel_value = [target_name] if target_name else []
+
+ kv_conn.kv.put(rel_key, json.dumps(rel_value))
+ print("Added relationship for {0}".format(rel_key))
+
+def delete_relationship(kv_conn, service_component_name):
+ rel_key = _create_rel_key(service_component_name)
+ index, rels = kv_conn.kv.get(rel_key)
+
+ if rels:
+ rels = json.loads(rels["Value"].decode("utf-8"))
+ kv_conn.kv.delete(rel_key)
+ return rels
+ else:
+ return []
diff --git a/relationships/relationshipplugin/tasks.py b/relationships/relationshipplugin/tasks.py
new file mode 100644
index 0000000..b17403c
--- /dev/null
+++ b/relationships/relationshipplugin/tasks.py
@@ -0,0 +1,131 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import json
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from relationshipplugin import discovery as dis
+import requests
+
+
+SERVICE_COMPONENT_NAME = "service_component_name"
+SELECTED_CONTAINER_DESTINATION = "selected_container_destination"
+CONSUL_HOST = "consul_host"
+
+CONSUL_HOSTNAME = "localhost"
+
+
+# Lifecycle interface calls for component_connect_to
+
+# NOTE: ctx.source and ctx.target are RelationshipSubjectContext
+# Order of operation of relationships is bit confusing. These operations are
+# implemented for `target_interfaces`. By observation, the target node processed,
+# then the source is created, the relationship is run then the source is started.
+# http://getcloudify.org/guide/3.1/dsl-spec-relationships.html#relationship-interfaces
+
+@operation
+def add_relationship(**kwargs):
+ """Adds target to the source relationship list"""
+ try:
+ conn = dis.create_kv_conn(CONSUL_HOSTNAME)
+
+ source_name = ctx.source.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+ # The use case for using the target name override is for the platform
+ # blueprint where the cdap broker needs to connect to a cdap cluster but
+ # the cdap cluster does not not use the component plugins so the name is
+ # not generated.
+ # REVIEW: Re-review this
+ target_name = kwargs["target_name_override"] \
+ if "target_name_override" in kwargs \
+ else ctx.target.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+
+ dis.store_relationship(conn, source_name, target_name)
+ ctx.logger.info("Created relationship: {0} to {1}".format(source_name,
+ target_name))
+ except Exception as e:
+ ctx.logger.error("Unexpected error while adding relationship: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+@operation
+def remove_relationship(**kwargs):
+ """Removes target from the source relationship list"""
+ try:
+ conn = dis.create_kv_conn(CONSUL_HOSTNAME)
+
+ source_name = ctx.source.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+ dis.delete_relationship(conn, source_name)
+ ctx.logger.info("Removed relationship: {0}".format(source_name))
+ except Exception as e:
+ ctx.logger.error("Unexpected error while removing relationship: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+
+# Lifecycle interface calls for component_contained_in
+
+@operation
+def forward_destination_info(**kwargs):
+ try:
+ selected_target = ctx.target.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+ ctx.source.instance.runtime_properties[SELECTED_CONTAINER_DESTINATION] = selected_target
+ ctx.logger.info("Forwarding selected target: {0}".format(ctx.source.instance.id))
+ except Exception as e:
+ ctx.logger.error("Unexpected error while forwarding selected target: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+@operation
+def registered_to(**kwargs):
+ """
+ Intended to be used in platform blueprints, but possible to be reused elsewhere
+ """
+ ctx.logger.info(str(kwargs))
+ address = kwargs["address_to_register"]
+ name = kwargs["name_to_register"]
+ port = kwargs["port_to_register"]
+
+ (consul_host, consul_port) = (CONSUL_HOSTNAME, 8500)
+ #Storing in source because that's who is getting registered
+ ctx.source.instance.runtime_properties[CONSUL_HOST] = "http://{0}:{1}".format(consul_host, consul_port)
+ ctx.source.instance.runtime_properties["name_to_register"] = name #careful! delete does not have access to inputs
+
+ try:
+ response = requests.put(url = "{0}/v1/agent/service/register".format(ctx.source.instance.runtime_properties[CONSUL_HOST]),
+ json = {
+ "name" : name,
+ "Address" : address,
+ "Port" : int(port)
+ },
+ headers={'Content-Type': 'application/json'})
+ response.raise_for_status() #bomb if not 2xx
+ except Exception as e:
+ ctx.logger.error("Error while registering: {0}".format(str(e)))
+ raise NonRecoverableError(e)
+
+@operation
+def registered_to_delete(**kwargs):
+ """
+ The deletion/opposite of registered_to
+ """
+ requests.put(url = "{0}/v1/agent/service/deregister/{1}".format(ctx.source.instance.runtime_properties[CONSUL_HOST], ctx.source.instance.runtime_properties["name_to_register"]),
+ headers={'Content-Type': 'Content-Type: application/json'})
+ #this is on delete so do not do any checking
diff --git a/relationships/requirements.txt b/relationships/requirements.txt
new file mode 100644
index 0000000..59c8c70
--- /dev/null
+++ b/relationships/requirements.txt
@@ -0,0 +1,12 @@
+bottle==0.12.7
+cloudify-plugins-common==3.4
+cloudify-rest-client==3.4
+Jinja2==2.7.2
+MarkupSafe==0.23
+networkx==1.8.1
+pika==0.9.14
+proxy-tools==0.1.0
+python-consul==0.6.1
+requests==2.7.0
+requests-toolbelt==0.7.0
+six==1.10.0
diff --git a/relationships/setup.py b/relationships/setup.py
new file mode 100644
index 0000000..fbbf077
--- /dev/null
+++ b/relationships/setup.py
@@ -0,0 +1,35 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import os
+from setuptools import setup
+
+setup(
+ name='relationshipplugin',
+ description='',
+ version="1.0.0",
+ author='Michael Hwang, Tommy Carpenter',
+ packages=['relationshipplugin'],
+ zip_safe=False,
+ install_requires=[
+ "python-consul>=0.6.0",
+ "cloudify-plugins-common==3.4.0"
+ ]
+)
diff --git a/relationships/tests/test_discovery.py b/relationships/tests/test_discovery.py
new file mode 100644
index 0000000..ae2d34b
--- /dev/null
+++ b/relationships/tests/test_discovery.py
@@ -0,0 +1,44 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import pytest
+from relationshipplugin import discovery as dis
+
+
+def test_create_kv_conn_parse_host():
+ # Just hostname
+ hostname = "some-consul.far.away"
+ assert (hostname, 8500) == dis._parse_host(hostname)
+
+ # Hostname:port
+ port = 8080
+ host = "{0}:{1}".format(hostname, port)
+ assert (hostname, port) == dis._parse_host(host)
+
+ # Invalid port
+ port = "abc"
+ host = "{0}:{1}".format(hostname, port)
+ with pytest.raises(dis.DiscoveryError):
+ dis._parse_host(host)
+
+ # Hanging colon
+ port = ""
+ host = "{0}:{1}".format(hostname, port)
+ assert (hostname, 8500) == dis._parse_host(host)