summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--LICENSE.txt6
-rwxr-xr-xcdap/cdap_types.yaml17
-rw-r--r--cdap/cdapplugin/.coveragerc4
-rw-r--r--cdap/cdapplugin/setup.py2
-rw-r--r--cdap/cdapplugin/tox.ini6
-rw-r--r--cdap/demo_blueprints/cdap_hello_world.yaml17
-rwxr-xr-xcdap/demo_blueprints/cdap_hello_world_reconfigure.sh17
-rw-r--r--cdap/demo_blueprints/cdap_hello_world_with_dmaap.yaml17
-rw-r--r--cdap/demo_blueprints/cdap_hello_world_with_laika.yaml17
-rw-r--r--cdap/demo_blueprints/cdap_hello_world_with_mr.yaml17
-rw-r--r--cdap/pom.xml14
-rw-r--r--dcae-policy/.coveragerc4
-rw-r--r--dcae-policy/LICENSE.txt4
-rw-r--r--dcae-policy/README.md23
-rw-r--r--dcae-policy/dcaepolicy-node-type.yaml48
-rw-r--r--dcae-policy/dcaepolicyplugin/__init__.py7
-rw-r--r--dcae-policy/dcaepolicyplugin/discovery.py67
-rw-r--r--dcae-policy/dcaepolicyplugin/tasks.py202
-rw-r--r--dcae-policy/pom.xml18
-rw-r--r--dcae-policy/setup.py8
-rw-r--r--dcae-policy/tests/__init__.py17
-rw-r--r--dcae-policy/tests/log_ctx.py16
-rw-r--r--dcae-policy/tests/mock_cloudify_ctx.py10
-rw-r--r--dcae-policy/tests/mock_setup.py155
-rw-r--r--dcae-policy/tests/test_discovery.py133
-rw-r--r--dcae-policy/tests/test_tasks.py277
-rw-r--r--dcae-policy/tox-local.ini3
-rw-r--r--dcae-policy/tox.ini6
-rw-r--r--docker/ChangeLog.md13
-rw-r--r--docker/README.md4
-rw-r--r--docker/docker-node-type.yaml127
-rw-r--r--docker/dockerplugin/tasks.py52
-rw-r--r--docker/examples/blueprint-laika-dmaap-pubs.yaml17
-rw-r--r--docker/examples/blueprint-laika-dmaap-pubsub.yaml17
-rw-r--r--docker/examples/blueprint-laika-dmaap-subs.yaml17
-rw-r--r--docker/examples/blueprint-laika-policy.yaml19
-rw-r--r--docker/examples/blueprint-laika.yaml17
-rw-r--r--docker/examples/blueprint-registrator.yaml17
-rw-r--r--docker/pom.xml16
-rw-r--r--docker/requirements.txt2
-rw-r--r--docker/setup.py6
-rw-r--r--docker/tests/test_decorators.py36
-rw-r--r--docker/tests/test_discovery.py16
-rw-r--r--docker/tests/test_tasks.py63
-rw-r--r--docker/tests/test_utils.py32
-rw-r--r--docker/tox.ini7
-rw-r--r--k8s/.gitignore71
-rw-r--r--k8s/ChangeLog.md24
-rw-r--r--k8s/DesignNotes.md13
-rw-r--r--k8s/LICENSE.txt32
-rw-r--r--k8s/README.md293
-rw-r--r--k8s/configure/__init__.py19
-rw-r--r--k8s/configure/configure.py83
-rw-r--r--k8s/k8s-node-type.yaml325
-rw-r--r--k8s/k8sclient/__init__.py20
-rw-r--r--k8s/k8sclient/k8sclient.py546
-rw-r--r--k8s/k8splugin/__init__.py30
-rw-r--r--k8s/k8splugin/decorators.py116
-rw-r--r--k8s/k8splugin/discovery.py269
-rw-r--r--k8s/k8splugin/exceptions.py29
-rw-r--r--k8s/k8splugin/tasks.py687
-rw-r--r--k8s/k8splugin/utils.py43
-rw-r--r--k8s/msb/__init__.py19
-rw-r--r--k8s/msb/msb.py64
-rw-r--r--k8s/pom.xml165
-rw-r--r--k8s/requirements.txt5
-rw-r--r--k8s/setup.py38
-rw-r--r--k8s/tests/conftest.py34
-rw-r--r--k8s/tests/test_decorators.py34
-rw-r--r--k8s/tests/test_discovery.py71
-rw-r--r--k8s/tests/test_tasks.py293
-rw-r--r--k8s/tests/test_utils.py33
-rw-r--r--k8s/tox.ini15
-rwxr-xr-xmvn-phase-lib.sh431
-rwxr-xr-xmvn-phase-script.sh89
-rw-r--r--pom.xml28
-rw-r--r--relationships/example_register_to_blueprint.yaml17
-rw-r--r--relationships/pom.xml14
-rw-r--r--relationships/relationship-types.yaml17
-rw-r--r--relationships/requirements.txt9
-rw-r--r--relationships/tox.ini6
81 files changed, 4768 insertions, 824 deletions
diff --git a/LICENSE.txt b/LICENSE.txt
index 69d5fc1..9536f0b 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,11 +1,11 @@
/*
* ============LICENSE_START==========================================
* ===================================================================
-* Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+* Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
* ===================================================================
*
* Unless otherwise specified, all software contained herein is licensed
-* under the Apache License, Version 2.0 (the “License”);
+* under the Apache License, Version 2.0 (the "License");
* you may not use this software except in compliance with the License.
* You may obtain a copy of the License at
*
@@ -20,7 +20,7 @@
*
*
* Unless otherwise specified, all documentation contained herein is licensed
-* under the Creative Commons License, Attribution 4.0 Intl. (the “License”);
+* under the Creative Commons License, Attribution 4.0 Intl. (the "License");
* you may not use this documentation except in compliance with the License.
* You may obtain a copy of the License at
*
diff --git a/cdap/cdap_types.yaml b/cdap/cdap_types.yaml
index 3c2a234..42e40b9 100755
--- a/cdap/cdap_types.yaml
+++ b/cdap/cdap_types.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/cdap/cdapplugin/.coveragerc b/cdap/cdapplugin/.coveragerc
index 9d9d9a2..7e87c60 100644
--- a/cdap/cdapplugin/.coveragerc
+++ b/cdap/cdapplugin/.coveragerc
@@ -23,7 +23,3 @@ exclude_lines =
if __name__ == .__main__.:
ignore_errors = True
-
-[xml]
-output = coverage-reports/coverage-cdapcloudify.xml
-
diff --git a/cdap/cdapplugin/setup.py b/cdap/cdapplugin/setup.py
index 8774699..6ca9461 100644
--- a/cdap/cdapplugin/setup.py
+++ b/cdap/cdapplugin/setup.py
@@ -32,7 +32,7 @@ setup(
url = "https://gerrit.onap.org/r/#/admin/projects/dcaegen2/platform/plugins",
zip_safe=False,
install_requires = [
- "onap-dcae-dcaepolicy-lib",
+ "onap-dcae-dcaepolicy-lib>=1.0.0,<2.0.0"
"uuid==1.30"
]
)
diff --git a/cdap/cdapplugin/tox.ini b/cdap/cdapplugin/tox.ini
index 246851e..2f4bb03 100644
--- a/cdap/cdapplugin/tox.ini
+++ b/cdap/cdapplugin/tox.ini
@@ -7,4 +7,8 @@ deps=
pytest
coverage
pytest-cov
-commands=pytest --junitxml xunit-reports/xunit-result-cdapcloudify.xml --cov {envsitepackagesdir} --cov-report=xml
+setenv=
+ PYTHONPATH={toxinidir}
+commands=
+ pytest --junitxml xunit-results.xml --cov cdapcloudify --cov-report xml
+ coverage xml
diff --git a/cdap/demo_blueprints/cdap_hello_world.yaml b/cdap/demo_blueprints/cdap_hello_world.yaml
index 0b5ff64..e154cf7 100644
--- a/cdap/demo_blueprints/cdap_hello_world.yaml
+++ b/cdap/demo_blueprints/cdap_hello_world.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/cdap/demo_blueprints/cdap_hello_world_reconfigure.sh b/cdap/demo_blueprints/cdap_hello_world_reconfigure.sh
index 95efe60..7731160 100755
--- a/cdap/demo_blueprints/cdap_hello_world_reconfigure.sh
+++ b/cdap/demo_blueprints/cdap_hello_world_reconfigure.sh
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
#!/bin/bash
cfy executions start -d cdap_hello_world -w execute_operation -p '{"operation" : "reconfiguration.app_config_reconfigure", "node_ids" : ["hw_cdap_app"], "operation_kwargs" : {"new_config_template" : {"foo":"bar-manual-update"}}, "allow_kwargs_override": true}'
cfy executions start -d cdap_hello_world -w execute_operation -p '{"operation" : "reconfiguration.app_preferences_reconfigure", "node_ids" : ["hw_cdap_app"], "operation_kwargs" : {"new_config_template" : {"foo_updated":"foo-pref-manual-update"}}, "allow_kwargs_override": true}'
diff --git a/cdap/demo_blueprints/cdap_hello_world_with_dmaap.yaml b/cdap/demo_blueprints/cdap_hello_world_with_dmaap.yaml
index 009fc37..11c6f75 100644
--- a/cdap/demo_blueprints/cdap_hello_world_with_dmaap.yaml
+++ b/cdap/demo_blueprints/cdap_hello_world_with_dmaap.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/cdap/demo_blueprints/cdap_hello_world_with_laika.yaml b/cdap/demo_blueprints/cdap_hello_world_with_laika.yaml
index 8381418..fc84f8b 100644
--- a/cdap/demo_blueprints/cdap_hello_world_with_laika.yaml
+++ b/cdap/demo_blueprints/cdap_hello_world_with_laika.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/cdap/demo_blueprints/cdap_hello_world_with_mr.yaml b/cdap/demo_blueprints/cdap_hello_world_with_mr.yaml
index fb1e863..f52edad 100644
--- a/cdap/demo_blueprints/cdap_hello_world_with_mr.yaml
+++ b/cdap/demo_blueprints/cdap_hello_world_with_mr.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/cdap/pom.xml b/cdap/pom.xml
index 3fcbb4e..477d68d 100644
--- a/cdap/pom.xml
+++ b/cdap/pom.xml
@@ -33,14 +33,14 @@ ECOMP is a trademark and service mark of AT&T Intellectual Property.
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <!-- sonar -->
- <sonar.skip>true</sonar.skip>
- <sonar.sources>cdapplugin/</sonar.sources>
- <sonar.junit.reportspath>cdapplugin/xunit-reports/xunit-result-cdapcloudify.xml</sonar.junit.reportspath>
- <sonar.python.coverage.reportpath>cdapplugin/coverage-reports/coverage-cdapcloudify.xml</sonar.python.coverage.reportpath>
+ <sonar.sources>.</sonar.sources>
+ <sonar.modules>cdapplugin</sonar.modules>
+ <cdapplugin.sonar.junit.reportsPath>xunit-results.xml</cdapplugin.sonar.junit.reportsPath>
+ <cdapplugin.sonar.python.coverage.reportPath>coverage.xml</cdapplugin.sonar.python.coverage.reportPath>
<sonar.language>py</sonar.language>
- <sonar.pluginname>python</sonar.pluginname>
- <sonar.inclusions>*/cdapcloudify/**.py</sonar.inclusions>
+ <sonar.pluginname>Python</sonar.pluginname>
+ <cdapplugin.sonar.inclusions>**/*.py</cdapplugin.sonar.inclusions>
+ <cdapplugin.sonar.exclusions>tests/*,setup.py</cdapplugin.sonar.exclusions>
</properties>
<build>
<finalName>${project.artifactId}-${project.version}</finalName>
diff --git a/dcae-policy/.coveragerc b/dcae-policy/.coveragerc
index 2e600b5..eba9bd0 100644
--- a/dcae-policy/.coveragerc
+++ b/dcae-policy/.coveragerc
@@ -23,7 +23,3 @@ exclude_lines =
if __name__ == .__main__.:
ignore_errors = True
-
-[xml]
-output = coverage-reports/coverage-dcaepolicyplugin.xml
-
diff --git a/dcae-policy/LICENSE.txt b/dcae-policy/LICENSE.txt
index cb8008a..948807c 100644
--- a/dcae-policy/LICENSE.txt
+++ b/dcae-policy/LICENSE.txt
@@ -1,7 +1,5 @@
-============LICENSE_START=======================================================
-org.onap.dcae
================================================================================
-Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/dcae-policy/README.md b/dcae-policy/README.md
index 42fa53b..a3a070b 100644
--- a/dcae-policy/README.md
+++ b/dcae-policy/README.md
@@ -1,11 +1,32 @@
# dcae-policy plugin and node-type
+
- python-package dcaepolicyplugin to be used in cloudify plugins to retrieve the policy from policy-handler
---
## dcaepolicy node type [dcaepolicy-node-type.yaml](./dcaepolicy-node-type.yaml)
+
- node type for dcae.nodes.policy
+- node type for dcae.nodes.policies
+
+---
+
+## discovery of policy-handler
+
+- dcaepolicyplugin will first try finding the record of ```policy_handler``` in consul services.
+
+- if failed, it will try finding config for "dcaepolicyplugin" in consul-kv
+
+ -- the config structure is expected to contain url to policy_handler
+ -- example of config value for key=```dcaepolicyplugin```:
+
+```json
+{ "dcaepolicyplugin" : { "policy_handler" : { "url" : "http://policy-handler:25577" } } }
+```
+
+- if still not found, it will default to hardcoded url of ```http://policy-handler```
+
---
## Usage
@@ -14,7 +35,7 @@ import the dcaepolicy-node-type.yaml into your blueprint to use the dcae.nodes.t
```yaml
imports:
- - https://YOUR_NEXUS_RAW_SERVER/type_files/dcaepolicy/1.0.0/node-type.yaml
+ - https://YOUR_NEXUS_RAW_SERVER/type_files/dcaepolicy/2.3.0/node-type.yaml
```
provide the value for policy_id property
diff --git a/dcae-policy/dcaepolicy-node-type.yaml b/dcae-policy/dcaepolicy-node-type.yaml
index 515d6b9..1c07b7a 100644
--- a/dcae-policy/dcaepolicy-node-type.yaml
+++ b/dcae-policy/dcaepolicy-node-type.yaml
@@ -1,7 +1,5 @@
-# ============LICENSE_START=======================================================
-# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -27,20 +25,58 @@ plugins:
dcaepolicy:
executor: 'central_deployment_agent'
package_name: dcaepolicyplugin
- package_version: 1.0.0
+ package_version: 2.3.0
+
+data_types:
+ # the properties inside dcae.data.policy_filter are identical to /getConfig API of policy-engine except the requestID field.
+ # refer to policy-engine /getConfig wiki for explanation of these properties.
+ # policy-engine /getConfig wiki: The filter works as a combined "AND" operation.
+ # To retrieve all policies using "sample" as configName,
+ # the request needs to have policyName = ".*" and configName as = "sample"
+ # configAttributes is a key-value dictionary or a stringified json of the dictionary
+ dcae.data.policy_filter:
+ properties:
+ policyName:
+ type: string
+ default: "DCAE.Config_.*"
+ configName:
+ type: string
+ default: ""
+ onapName:
+ type: string
+ default: "DCAE"
+ configAttributes:
+ default: {}
+ unique:
+ type: boolean
+ default: false
node_types:
+ # node that points to a single latest policy identified by policy_id
+ # policy_id is the versionless left part of policyName in policy-engine
dcae.nodes.policy:
derived_from: cloudify.nodes.Root
properties:
policy_id:
- description: PK to policy in policy-engine
+ description: versionless key to policy in policy-engine
type: string
default: DCAE.Config_unknown-policy
policy_required:
description: whether to throw an exception when failed to get the policy
type: boolean
- default: true
+ default: false
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: dcaepolicy.dcaepolicyplugin.policy_get
+
+ # node that points to varying collection of policies by selection criteria = policy_filter.
+ dcae.nodes.policies:
+ derived_from: cloudify.nodes.Root
+ properties:
+ policy_filter:
+ type: dcae.data.policy_filter
+ default: {}
interfaces:
cloudify.interfaces.lifecycle:
create:
diff --git a/dcae-policy/dcaepolicyplugin/__init__.py b/dcae-policy/dcaepolicyplugin/__init__.py
index d2946a6..173c1eb 100644
--- a/dcae-policy/dcaepolicyplugin/__init__.py
+++ b/dcae-policy/dcaepolicyplugin/__init__.py
@@ -1,8 +1,5 @@
-""":policyplugin: gets the policy from policy-handler and stores it into runtime properties"""
-# ============LICENSE_START=======================================================
-# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,4 +16,6 @@
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+""":policyplugin: gets the policy from policy-handler and stores it into runtime properties"""
+
from .tasks import policy_get
diff --git a/dcae-policy/dcaepolicyplugin/discovery.py b/dcae-policy/dcaepolicyplugin/discovery.py
index 8cdbde1..0517377 100644
--- a/dcae-policy/dcaepolicyplugin/discovery.py
+++ b/dcae-policy/dcaepolicyplugin/discovery.py
@@ -1,9 +1,5 @@
-"""client to talk to consul on standard port 8500"""
-
-# ============LICENSE_START=======================================================
-# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,16 +16,69 @@
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+"""client to talk to consul on standard port 8500"""
+
+import base64
+import json
+
import requests
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+# it is safe to assume that consul agent is at consul:8500
+# define consul alis in /etc/hosts on cloudify manager vm
+# $ cat /etc/hosts
+# 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 consul
+
+CONSUL_SERVICE_URL = "http://consul:8500/v1/catalog/service/{0}"
+CONSUL_KV_MASK = "http://consul:8500/v1/kv/{0}"
-# it is safe to assume that consul agent is at localhost:8500 along with cloudify manager
-CONSUL_SERVICE_URL = "http://localhost:8500/v1/catalog/service/{0}"
def discover_service_url(service_name):
"""find the service record in consul"""
- response = requests.get(CONSUL_SERVICE_URL.format(service_name))
- response.raise_for_status()
+ service_url = CONSUL_SERVICE_URL.format(service_name)
+ ctx.logger.info("getting service_url at {0}".format(service_url))
+
+ try:
+ response = requests.get(service_url)
+ except requests.ConnectionError as ex:
+ raise NonRecoverableError(
+ "ConnectionError - failed to get {0}: {1}".format(service_url, str(ex)))
+
+ ctx.logger.info("got {0} for service_url at {1} response: {2}"
+ .format(response.status_code, service_url, response.text))
+
+ if response.status_code != requests.codes.ok:
+ return
+
resp_json = response.json()
if resp_json:
service = resp_json[0]
return "http://{0}:{1}".format(service["ServiceAddress"], service["ServicePort"])
+
+
+def discover_value(key):
+ """get the value for the key from consul-kv"""
+ kv_url = CONSUL_KV_MASK.format(key)
+ ctx.logger.info("getting kv at {0}".format(kv_url))
+
+ try:
+ response = requests.get(kv_url)
+ except requests.ConnectionError as ex:
+ raise NonRecoverableError(
+ "ConnectionError - failed to get {0}: {1}".format(kv_url, str(ex)))
+
+ ctx.logger.info("got {0} for kv at {1} response: {2}"
+ .format(response.status_code, kv_url, response.text))
+
+ if response.status_code != requests.codes.ok:
+ return
+
+ data = response.json()
+ if not data:
+ ctx.logger.error("failed discover_value %s", key)
+ return
+ value = base64.b64decode(data[0]["Value"]).decode("utf-8")
+ ctx.logger.info("consul-kv key=%s value(%s) data=%s",
+ key, value, json.dumps(data))
+ return json.loads(value)
diff --git a/dcae-policy/dcaepolicyplugin/tasks.py b/dcae-policy/dcaepolicyplugin/tasks.py
index 2676864..bbf3ec1 100644
--- a/dcae-policy/dcaepolicyplugin/tasks.py
+++ b/dcae-policy/dcaepolicyplugin/tasks.py
@@ -1,7 +1,5 @@
-# ============LICENSE_START=======================================================
-# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,29 +16,43 @@
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-# Lifecycle interface calls for DockerContainer
+"""tasks are the cloudify operations invoked on interfaces defined in the blueprint"""
+import copy
import json
+import traceback
import uuid
import requests
-
from cloudify import ctx
-from cloudify.decorators import operation
from cloudify.context import NODE_INSTANCE
+from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
-from .discovery import discover_service_url
+from .discovery import discover_service_url, discover_value
+DCAE_POLICY_PLUGIN = "dcaepolicyplugin"
POLICY_ID = 'policy_id'
POLICY_REQUIRED = 'policy_required'
POLICY_BODY = 'policy_body'
+POLICIES_FILTERED = 'policies_filtered'
+POLICY_FILTER = 'policy_filter'
+LATEST_POLICIES = "latest_policies"
+
+REQUEST_ID = "requestID"
+
DCAE_POLICY_TYPE = 'dcae.nodes.policy'
+DCAE_POLICIES_TYPE = 'dcae.nodes.policies'
+DCAE_POLICY_TYPES = [DCAE_POLICY_TYPE, DCAE_POLICIES_TYPE]
+CONFIG_ATTRIBUTES = "configAttributes"
+
class PolicyHandler(object):
"""talk to policy-handler"""
SERVICE_NAME_POLICY_HANDLER = "policy_handler"
X_ECOMP_REQUESTID = 'X-ECOMP-RequestID'
+ STATUS_CODE_POLICIES_NOT_FOUND = 404
+ DEFAULT_URL = "http://policy-handler"
_url = None
@staticmethod
@@ -49,53 +61,177 @@ class PolicyHandler(object):
if PolicyHandler._url:
return
- PolicyHandler._url = "{0}/policy_latest".format(
- discover_service_url(PolicyHandler.SERVICE_NAME_POLICY_HANDLER)
- )
+ PolicyHandler._url = discover_service_url(PolicyHandler.SERVICE_NAME_POLICY_HANDLER)
+ if PolicyHandler._url:
+ return
+
+ config = discover_value(DCAE_POLICY_PLUGIN)
+ if config and isinstance(config, dict):
+ # expected structure for the config value for dcaepolicyplugin key
+ # {
+ # "dcaepolicyplugin" : {
+ # "policy_handler" : {
+ # "target_entity" : "policy_handler",
+ # "url" : "http://policy-handler:25577"
+ # }
+ # }
+ # }
+ PolicyHandler._url = config.get(DCAE_POLICY_PLUGIN, {}) \
+ .get(PolicyHandler.SERVICE_NAME_POLICY_HANDLER, {}).get("url")
+
+ if PolicyHandler._url:
+ return
+
+ PolicyHandler._url = PolicyHandler.DEFAULT_URL
@staticmethod
def get_latest_policy(policy_id):
"""retrieve the latest policy for policy_id from policy-handler"""
PolicyHandler._lazy_init()
- ph_path = "{0}/{1}".format(PolicyHandler._url, policy_id)
+ ph_path = "{0}/policy_latest/{1}".format(PolicyHandler._url, policy_id)
headers = {PolicyHandler.X_ECOMP_REQUESTID: str(uuid.uuid4())}
- ctx.logger.info("getting latest policy from {0} headers={1}".format( \
+ ctx.logger.info("getting latest policy from {0} headers={1}".format(
ph_path, json.dumps(headers)))
res = requests.get(ph_path, headers=headers)
+ ctx.logger.info("latest policy for policy_id({0}) status({1}) response: {2}"
+ .format(policy_id, res.status_code, res.text))
+
+ if res.status_code == PolicyHandler.STATUS_CODE_POLICIES_NOT_FOUND:
+ return
+
res.raise_for_status()
+ return res.json()
- if res.status_code == requests.codes.ok:
- return res.json()
- return {}
+ @staticmethod
+ def find_latest_policies(policy_filter):
+ """retrieve the latest policies by policy filter (selection criteria) from policy-handler"""
+ PolicyHandler._lazy_init()
-#########################################################
-@operation
-def policy_get(**kwargs):
- """retrieve the latest policy_body for policy_id property and save it in runtime_properties"""
- if ctx.type != NODE_INSTANCE or DCAE_POLICY_TYPE not in ctx.node.type_hierarchy:
- error = "can only invoke policy_get on node of type {0}".format(DCAE_POLICY_TYPE)
- ctx.logger.error(error)
- raise NonRecoverableError(error)
+ ph_path = "{0}/policies_latest".format(PolicyHandler._url)
+ headers = {
+ PolicyHandler.X_ECOMP_REQUESTID: policy_filter.get(REQUEST_ID, str(uuid.uuid4()))
+ }
+
+ ctx.logger.info("finding the latest polices from {0} by {1} headers={2}".format(
+ ph_path, json.dumps(policy_filter), json.dumps(headers)))
+
+ res = requests.post(ph_path, json=policy_filter, headers=headers)
+ ctx.logger.info("latest policies status({0}) response: {1}"
+ .format(res.status_code, res.text))
+
+ if res.status_code == PolicyHandler.STATUS_CODE_POLICIES_NOT_FOUND:
+ return
- if POLICY_ID not in ctx.node.properties:
+ res.raise_for_status()
+ return res.json().get(LATEST_POLICIES)
+
+
+def _policy_get():
+ """
+ dcae.nodes.policy -
+ retrieve the latest policy_body for policy_id property
+ and save policy_body in runtime_properties
+ """
+ if DCAE_POLICY_TYPE not in ctx.node.type_hierarchy:
+ return
+
+ policy_id = ctx.node.properties.get(POLICY_ID)
+ policy_required = ctx.node.properties.get(POLICY_REQUIRED)
+ if not policy_id:
error = "no {0} found in ctx.node.properties".format(POLICY_ID)
ctx.logger.error(error)
raise NonRecoverableError(error)
+ policy = None
try:
- policy_id = ctx.node.properties[POLICY_ID]
policy = PolicyHandler.get_latest_policy(policy_id)
- if not policy:
- raise NonRecoverableError("policy not found for policy_id {0}".format(policy_id))
+ except Exception as ex:
+ error = "failed to get policy({0}): {1}".format(policy_id, str(ex))
+ ctx.logger.error("{0}: {1}".format(error, traceback.format_exc()))
+ if policy_required:
+ raise NonRecoverableError(error)
+
+ if not policy:
+ error = "policy not found for policy_id {0}".format(policy_id)
+ ctx.logger.info(error)
+ if policy_required:
+ raise NonRecoverableError(error)
+ return True
+
+ ctx.logger.info("found policy {0}: {1}".format(policy_id, json.dumps(policy)))
+ if POLICY_BODY in policy:
+ ctx.instance.runtime_properties[POLICY_BODY] = policy[POLICY_BODY]
+ return True
+
+
+def _fix_policy_filter(policy_filter):
+ if CONFIG_ATTRIBUTES in policy_filter:
+ config_attributes = policy_filter.get(CONFIG_ATTRIBUTES)
+ if isinstance(config_attributes, dict):
+ return
+ try:
+ config_attributes = json.loads(config_attributes)
+ if config_attributes and isinstance(config_attributes, dict):
+ policy_filter[CONFIG_ATTRIBUTES] = config_attributes
+ return
+ except (ValueError, TypeError):
+ pass
+ if config_attributes:
+ ctx.logger.warn("unexpected %s: %s", CONFIG_ATTRIBUTES, config_attributes)
+ del policy_filter[CONFIG_ATTRIBUTES]
+
+
+def _policies_find():
+ """
+ dcae.nodes.policies -
+ retrieve the latest policies for selection criteria
+ and save found policies in runtime_properties
+ """
+ if DCAE_POLICIES_TYPE not in ctx.node.type_hierarchy:
+ return
+
+ try:
+ policy_filter = copy.deepcopy(dict(
+ (k, v) for (k, v) in dict(ctx.node.properties.get(POLICY_FILTER, {})).iteritems()
+ if v or isinstance(v, (int, float))
+ ))
+ _fix_policy_filter(policy_filter)
+
+ if REQUEST_ID not in policy_filter:
+ policy_filter[REQUEST_ID] = str(uuid.uuid4())
+
+ policies_filtered = PolicyHandler.find_latest_policies(policy_filter)
+
+ if not policies_filtered:
+ ctx.logger.info("policies not found by {0}".format(json.dumps(policy_filter)))
+ return True
- ctx.logger.info("found policy {0}".format(json.dumps(policy)))
- if POLICY_BODY in policy:
- ctx.instance.runtime_properties[POLICY_BODY] = policy[POLICY_BODY]
+ ctx.logger.info("found policies by {0}: {1}".format(
+ json.dumps(policy_filter), json.dumps(policies_filtered)
+ ))
+ ctx.instance.runtime_properties[POLICIES_FILTERED] = policies_filtered
except Exception as ex:
- error = "failed to get policy: {0}".format(str(ex))
+ error = "failed to find policies: {0}".format(str(ex))
+ ctx.logger.error("{0}: {1}".format(error, traceback.format_exc()))
+
+ return True
+
+
+#########################################################
+@operation
+def policy_get(**kwargs):
+ """retrieve the policy or policies and save it in runtime_properties"""
+ if ctx.type != NODE_INSTANCE:
+ raise NonRecoverableError("can only invoke policy_get on node of types: {0}"
+ .format(DCAE_POLICY_TYPES))
+
+ if not _policy_get() and not _policies_find():
+ error = "unexpected node type {0} for policy_get - expected types: {1}" \
+ .format(ctx.node.type_hierarchy, DCAE_POLICY_TYPES)
ctx.logger.error(error)
- if ctx.node.properties.get(POLICY_REQUIRED, True):
- raise NonRecoverableError(error)
+ raise NonRecoverableError(error)
+
+ ctx.logger.info("exit policy_get")
diff --git a/dcae-policy/pom.xml b/dcae-policy/pom.xml
index 9573762..949ecf8 100644
--- a/dcae-policy/pom.xml
+++ b/dcae-policy/pom.xml
@@ -1,7 +1,7 @@
<?xml version="1.0"?>
<!--
================================================================================
-Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -28,25 +28,17 @@ ECOMP is a trademark and service mark of AT&T Intellectual Property.
<groupId>org.onap.dcaegen2.platform.plugins</groupId>
<artifactId>dcae-policy</artifactId>
<name>dcae-policy-plugin</name>
- <version>1.1.0-SNAPSHOT</version>
+ <version>2.3.0-SNAPSHOT</version>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <sonar.skip>true</sonar.skip>
<sonar.sources>.</sonar.sources>
- <!-- customize the SONARQUBE URL -->
- <!-- sonar.host.url>http://localhost:9000</sonar.host.url -->
- <!-- below are language dependent -->
- <!-- for Python -->
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPath>coverage.xml</sonar.python.coverage.reportPath>
<sonar.language>py</sonar.language>
<sonar.pluginName>Python</sonar.pluginName>
<sonar.inclusions>**/*.py</sonar.inclusions>
- <!-- for JavaScaript -->
- <!--
- <sonar.language>js</sonar.language>
- <sonar.pluginName>JS</sonar.pluginName>
- <sonar.inclusions>**/*.js</sonar.inclusions>
- -->
+ <sonar.exclusions>tests/*,setup.py</sonar.exclusions>
</properties>
<build>
<finalName>${project.artifactId}-${project.version}</finalName>
diff --git a/dcae-policy/setup.py b/dcae-policy/setup.py
index 528e744..d1a014e 100644
--- a/dcae-policy/setup.py
+++ b/dcae-policy/setup.py
@@ -1,7 +1,5 @@
-# ============LICENSE_START=======================================================
-# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,12 +16,14 @@
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+"""package for dcaepolicyplugin - getting policies from policy-engine through policy-handler"""
+
from setuptools import setup
setup(
name='dcaepolicyplugin',
description='Cloudify plugin for dcae.nodes.policy node to retrieve the policy config',
- version="1.0.0",
+ version="2.3.0",
author='Alex Shatov',
packages=['dcaepolicyplugin'],
install_requires=[
diff --git a/dcae-policy/tests/__init__.py b/dcae-policy/tests/__init__.py
new file mode 100644
index 0000000..5d59d8b
--- /dev/null
+++ b/dcae-policy/tests/__init__.py
@@ -0,0 +1,17 @@
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
diff --git a/dcae-policy/tests/log_ctx.py b/dcae-policy/tests/log_ctx.py
index 9f5464d..7685893 100644
--- a/dcae-policy/tests/log_ctx.py
+++ b/dcae-policy/tests/log_ctx.py
@@ -1,8 +1,5 @@
-""":@CtxLogger.log_ctx: decorator for logging the cloudify ctx before and after operation"""
-
-# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,12 +16,16 @@
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+""":@CtxLogger.log_ctx: decorator for logging the cloudify ctx before and after operation"""
+
import json
+import traceback
from functools import wraps
from cloudify import ctx
from cloudify.context import NODE_INSTANCE, RELATIONSHIP_INSTANCE
+
class CtxLogger(object):
"""static class for logging cloudify context ctx"""
@staticmethod
@@ -102,7 +103,8 @@ class CtxLogger(object):
ctx.logger.info("{0} context: {1}".format(\
func_name, json.dumps(CtxLogger.get_ctx_info())))
except Exception as ex:
- ctx.logger.error("Failed to log the node context: {0}".format(str(ex)))
+ ctx.logger.error("Failed to log the node context: {0}: {1}" \
+ .format(str(ex), traceback.format_exc()))
@staticmethod
def log_ctx(pre_log=True, after_log=False, exe_task=None):
@@ -119,8 +121,8 @@ class CtxLogger(object):
if ctx.type == NODE_INSTANCE and exe_task:
ctx.instance.runtime_properties[exe_task] = func.__name__
except Exception as ex:
- ctx.logger.error("Failed to set exe_task {0}: {1}".format(\
- exe_task, str(ex)))
+ ctx.logger.error("Failed to set exe_task {0}: {1}: {2}" \
+ .format(exe_task, str(ex), traceback.format_exc()))
if pre_log:
CtxLogger.log_ctx_info('before ' + func.__name__)
diff --git a/dcae-policy/tests/mock_cloudify_ctx.py b/dcae-policy/tests/mock_cloudify_ctx.py
index 0c130c0..fb52b43 100644
--- a/dcae-policy/tests/mock_cloudify_ctx.py
+++ b/dcae-policy/tests/mock_cloudify_ctx.py
@@ -1,8 +1,5 @@
-
-# ============LICENSE_START=======================================================
-# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,7 +16,10 @@
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-from cloudify.mocks import MockCloudifyContext, MockNodeInstanceContext, MockNodeContext
+"""mock cloudify context with relationships and type_hierarchy"""
+
+from cloudify.mocks import (MockCloudifyContext, MockNodeContext,
+ MockNodeInstanceContext)
TARGET_NODE_ID = "target_node_id"
TARGET_NODE_NAME = "target_node_name"
diff --git a/dcae-policy/tests/mock_setup.py b/dcae-policy/tests/mock_setup.py
new file mode 100644
index 0000000..cbc4a35
--- /dev/null
+++ b/dcae-policy/tests/mock_setup.py
@@ -0,0 +1,155 @@
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+"""unit tests for tasks in dcaepolicyplugin"""
+
+import json
+import logging
+from datetime import datetime, timedelta
+
+from tests.mock_cloudify_ctx import MockCloudifyContextFull
+
+LOG_FILE = 'logs/test_dcaepolicyplugin.log'
+POLICY_ID = 'policy_id'
+POLICY_VERSION = "policyVersion"
+POLICY_NAME = "policyName"
+POLICY_BODY = 'policy_body'
+POLICY_CONFIG = 'config'
+CONFIG_NAME = "ConfigName"
+MONKEYED_POLICY_ID = 'monkeyed.Config_peach'
+
+RUN_TS = datetime.utcnow()
+
+
+class MonkeyedLogHandler(object):
+ """keep the shared logger handler here"""
+ _log_handler = None
+
+ @staticmethod
+ def add_handler_to(logger):
+ """adds the local handler to the logger"""
+ if not MonkeyedLogHandler._log_handler:
+ MonkeyedLogHandler._log_handler = logging.FileHandler(LOG_FILE)
+ MonkeyedLogHandler._log_handler.setLevel(logging.DEBUG)
+ formatter = logging.Formatter(
+ fmt='%(asctime)s.%(msecs)03d %(levelname)+8s ' +
+ '%(threadName)s %(name)s.%(funcName)s: %(message)s',
+ datefmt='%Y%m%d_%H%M%S')
+ MonkeyedLogHandler._log_handler.setFormatter(formatter)
+ logger.addHandler(MonkeyedLogHandler._log_handler)
+
+
+class MonkeyedPolicyBody(object):
+ """policy body that policy-engine returns"""
+ @staticmethod
+ def create_policy_body(policy_id, policy_version=1):
+ """returns a fake policy-body"""
+ prev_ver = policy_version - 1
+ timestamp = RUN_TS + timedelta(hours=prev_ver)
+
+ prev_ver = str(prev_ver)
+ this_ver = str(policy_version)
+ config = {
+ "policy_updated_from_ver": prev_ver,
+ "policy_updated_to_ver": this_ver,
+ "policy_hello": "world!",
+ "policy_updated_ts": timestamp.isoformat()[:-3] + 'Z',
+ "updated_policy_id": policy_id
+ }
+ return {
+ "policyConfigMessage": "Config Retrieved! ",
+ "policyConfigStatus": "CONFIG_RETRIEVED",
+ "type": "JSON",
+ POLICY_NAME: "{0}.{1}.xml".format(policy_id, this_ver),
+ POLICY_VERSION: this_ver,
+ POLICY_CONFIG: config,
+ "matchingConditions": {
+ "ONAPName": "DCAE",
+ CONFIG_NAME: "alex_config_name"
+ },
+ "responseAttributes": {},
+ "property": None
+ }
+
+ @staticmethod
+ def create_policy(policy_id, policy_version=1):
+ """returns the whole policy object for policy_id and policy_version"""
+ return {
+ POLICY_ID: policy_id,
+ POLICY_BODY: MonkeyedPolicyBody.create_policy_body(policy_id, policy_version)
+ }
+
+ @staticmethod
+ def is_the_same_dict(policy_body_1, policy_body_2):
+ """check whether both policy_body objects are the same"""
+ if not isinstance(policy_body_1, dict) or not isinstance(policy_body_2, dict):
+ return False
+ for key in policy_body_1.keys():
+ if key not in policy_body_2:
+ return False
+
+ val_1 = policy_body_1[key]
+ val_2 = policy_body_2[key]
+ if isinstance(val_1, dict) \
+ and not MonkeyedPolicyBody.is_the_same_dict(val_1, val_2):
+ return False
+ if (val_1 is None and val_2 is not None) \
+ or (val_1 is not None and val_2 is None) \
+ or (val_1 != val_2):
+ return False
+ return True
+
+
+class MonkeyedResponse(object):
+ """Monkey response"""
+ def __init__(self, full_path, headers=None, resp_json=None):
+ self.full_path = full_path
+ self.status_code = 200
+ self.headers = headers or {}
+ self.resp_json = resp_json
+ self.text = json.dumps(resp_json or {})
+
+ def json(self):
+ """returns json of response"""
+ return self.resp_json
+
+ def raise_for_status(self):
+ """always happy"""
+ pass
+
+
+class MonkeyedNode(object):
+ """node in cloudify"""
+ BLUEPRINT_ID = 'test_dcae_policy_bp_id'
+ DEPLOYMENT_ID = 'test_dcae_policy_dpl_id'
+ EXECUTION_ID = 'test_dcae_policy_exe_id'
+
+ def __init__(self, node_id, node_name, node_type, properties, relationships=None):
+ self.node_id = node_id
+ self.node_name = node_name
+ self.ctx = MockCloudifyContextFull(
+ node_id=self.node_id,
+ node_name=self.node_name,
+ node_type=node_type,
+ blueprint_id=MonkeyedNode.BLUEPRINT_ID,
+ deployment_id=MonkeyedNode.DEPLOYMENT_ID,
+ execution_id=MonkeyedNode.EXECUTION_ID,
+ properties=properties,
+ relationships=relationships
+ )
+ MonkeyedLogHandler.add_handler_to(self.ctx.logger)
diff --git a/dcae-policy/tests/test_discovery.py b/dcae-policy/tests/test_discovery.py
new file mode 100644
index 0000000..129ad99
--- /dev/null
+++ b/dcae-policy/tests/test_discovery.py
@@ -0,0 +1,133 @@
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+"""unit tests for discovery in dcaepolicyplugin"""
+
+import base64
+import json
+
+import pytest
+import requests
+from cloudify.exceptions import NonRecoverableError
+from cloudify.state import current_ctx
+
+from dcaepolicyplugin import discovery, tasks
+from tests.log_ctx import CtxLogger
+from tests.mock_cloudify_ctx import MockCloudifyContextFull
+from tests.mock_setup import (MONKEYED_POLICY_ID, POLICY_ID, MonkeyedNode,
+ MonkeyedResponse)
+
+POLICY_HANDLER_FROM_KV = "http://policy_handler_from_kv:25577"
+
+
+def monkeyed_discovery_get_failure(full_path):
+ """monkeypatch for the GET to consul"""
+ raise requests.ConnectionError("monkey-boom")
+
+
+def test_discovery_failure(monkeypatch):
+ """test finding policy-handler in consul"""
+ monkeypatch.setattr('requests.get', monkeyed_discovery_get_failure)
+
+ node_policy = MonkeyedNode(
+ 'test_dcae_policy_node_id',
+ 'test_dcae_policy_node_name',
+ tasks.DCAE_POLICY_TYPE,
+ {POLICY_ID: MONKEYED_POLICY_ID}
+ )
+ try:
+ current_ctx.set(node_policy.ctx)
+ with pytest.raises(NonRecoverableError) as excinfo:
+ tasks.PolicyHandler._lazy_init()
+
+ CtxLogger.log_ctx_info("test_discovery_failure: {0}".format(str(excinfo.value)))
+ assert str(excinfo.value).startswith("ConnectionError")
+
+
+ finally:
+ tasks.PolicyHandler._url = None
+ MockCloudifyContextFull.clear()
+ current_ctx.clear()
+
+
+def monkeyed_discovery_get_kv(full_path):
+ """monkeypatch for the GET to consul"""
+ if full_path.startswith(discovery.CONSUL_SERVICE_URL.format("")):
+ return MonkeyedResponse(full_path)
+
+ if full_path.startswith(discovery.CONSUL_KV_MASK.format("")):
+ value = base64.b64encode(json.dumps(
+ {tasks.DCAE_POLICY_PLUGIN: {
+ tasks.PolicyHandler.SERVICE_NAME_POLICY_HANDLER: {
+ "url": POLICY_HANDLER_FROM_KV}}}
+ ))
+ return MonkeyedResponse(full_path, {}, [{"Value": value}])
+
+ return MonkeyedResponse(full_path)
+
+
+def test_discovery_kv(monkeypatch):
+ """test finding policy-handler in consul"""
+ monkeypatch.setattr('requests.get', monkeyed_discovery_get_kv)
+
+ node_policy = MonkeyedNode(
+ 'test_dcae_policy_node_id',
+ 'test_dcae_policy_node_name',
+ tasks.DCAE_POLICY_TYPE,
+ {POLICY_ID: MONKEYED_POLICY_ID}
+ )
+ try:
+ current_ctx.set(node_policy.ctx)
+ tasks.PolicyHandler._lazy_init()
+ assert POLICY_HANDLER_FROM_KV == tasks.PolicyHandler._url
+
+ finally:
+ tasks.PolicyHandler._url = None
+ MockCloudifyContextFull.clear()
+ current_ctx.clear()
+
+
+def monkeyed_discovery_get(full_path):
+ """monkeypatch for the GET to consul"""
+ return MonkeyedResponse(full_path, {},
+ [{"ServiceAddress": "monkey-policy-handler-address", "ServicePort": "9999"}])
+
+
+def test_discovery(monkeypatch):
+ """test finding policy-handler in consul"""
+ monkeypatch.setattr('requests.get', monkeyed_discovery_get)
+
+ node_policy = MonkeyedNode(
+ 'test_dcae_policy_node_id',
+ 'test_dcae_policy_node_name',
+ tasks.DCAE_POLICY_TYPE,
+ {POLICY_ID: MONKEYED_POLICY_ID}
+ )
+
+ try:
+ current_ctx.set(node_policy.ctx)
+ expected = "http://monkey-policy-handler-address:9999"
+ CtxLogger.log_ctx_info("before PolicyHandler._lazy_init")
+ tasks.PolicyHandler._lazy_init()
+ CtxLogger.log_ctx_info("after PolicyHandler._lazy_init")
+ assert expected == tasks.PolicyHandler._url
+
+ finally:
+ tasks.PolicyHandler._url = None
+ MockCloudifyContextFull.clear()
+ current_ctx.clear()
diff --git a/dcae-policy/tests/test_tasks.py b/dcae-policy/tests/test_tasks.py
index e2cc2e6..f2ca205 100644
--- a/dcae-policy/tests/test_tasks.py
+++ b/dcae-policy/tests/test_tasks.py
@@ -1,7 +1,5 @@
-# ============LICENSE_START=======================================================
-# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,167 +16,41 @@
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+"""unit tests for tasks in dcaepolicyplugin"""
+
import json
-import logging
-from datetime import datetime, timedelta
import pytest
-
-from cloudify.state import current_ctx
from cloudify.exceptions import NonRecoverableError
-
-from mock_cloudify_ctx import MockCloudifyContextFull, TARGET_NODE_ID, TARGET_NODE_NAME
-from log_ctx import CtxLogger
+from cloudify.state import current_ctx
from dcaepolicyplugin import tasks
+from tests.log_ctx import CtxLogger
+from tests.mock_cloudify_ctx import (TARGET_NODE_ID, TARGET_NODE_NAME,
+ MockCloudifyContextFull)
+from tests.mock_setup import (CONFIG_NAME, MONKEYED_POLICY_ID, POLICY_BODY,
+ POLICY_ID, POLICY_NAME, MonkeyedNode,
+ MonkeyedPolicyBody, MonkeyedResponse)
-DCAE_POLICY_TYPE = 'dcae.nodes.policy'
-POLICY_ID = 'policy_id'
-POLICY_VERSION = "policyVersion"
-POLICY_NAME = "policyName"
-POLICY_BODY = 'policy_body'
-POLICY_CONFIG = 'config'
-MONKEYED_POLICY_ID = 'monkeyed.Config_peach'
-LOG_FILE = 'test_dcaepolicyplugin.log'
-
-RUN_TS = datetime.utcnow()
-
-class MonkeyedLogHandler(object):
- """keep the shared logger handler here"""
- _log_handler = None
-
- @staticmethod
- def add_handler_to(logger):
- """adds the local handler to the logger"""
- if not MonkeyedLogHandler._log_handler:
- MonkeyedLogHandler._log_handler = logging.FileHandler(LOG_FILE)
- MonkeyedLogHandler._log_handler.setLevel(logging.DEBUG)
- formatter = logging.Formatter(
- fmt='%(asctime)s.%(msecs)03d %(levelname)+8s ' + \
- '%(threadName)s %(name)s.%(funcName)s: %(message)s', \
- datefmt='%Y%m%d_%H%M%S')
- MonkeyedLogHandler._log_handler.setFormatter(formatter)
- logger.addHandler(MonkeyedLogHandler._log_handler)
-
-class MonkeyedPolicyBody(object):
- """policy body that policy-engine returns"""
- @staticmethod
- def create_policy_body(policy_id, policy_version=1):
- """returns a fake policy-body"""
- prev_ver = policy_version - 1
- timestamp = RUN_TS + timedelta(hours=prev_ver)
-
- prev_ver = str(prev_ver)
- this_ver = str(policy_version)
- config = {
- "policy_updated_from_ver": prev_ver,
- "policy_updated_to_ver": this_ver,
- "policy_hello": "world!",
- "policy_updated_ts": timestamp.isoformat()[:-3] + 'Z',
- "updated_policy_id": policy_id
- }
- return {
- "policyConfigMessage": "Config Retrieved! ",
- "policyConfigStatus": "CONFIG_RETRIEVED",
- "type": "JSON",
- POLICY_NAME: "{0}.{1}.xml".format(policy_id, this_ver),
- POLICY_VERSION: this_ver,
- POLICY_CONFIG: config,
- "matchingConditions": {
- "ECOMPName": "DCAE",
- "ConfigName": "alex_config_name"
- },
- "responseAttributes": {},
- "property": None
- }
- @staticmethod
- def create_policy(policy_id, policy_version=1):
- """returns the whole policy object for policy_id and policy_version"""
- return {
- POLICY_ID : policy_id,
- POLICY_BODY : MonkeyedPolicyBody.create_policy_body(policy_id, policy_version)
- }
+LATEST_POLICIES = "latest_policies"
- @staticmethod
- def is_the_same_dict(policy_body_1, policy_body_2):
- """check whether both policy_body objects are the same"""
- if not isinstance(policy_body_1, dict) or not isinstance(policy_body_2, dict):
- return False
- for key in policy_body_1.keys():
- if key not in policy_body_2:
- return False
- if isinstance(policy_body_1[key], dict):
- return MonkeyedPolicyBody.is_the_same_dict(
- policy_body_1[key], policy_body_2[key])
- if (policy_body_1[key] is None and policy_body_2[key] is not None) \
- or (policy_body_1[key] is not None and policy_body_2[key] is None) \
- or (policy_body_1[key] != policy_body_2[key]):
- return False
- return True
-
-class MonkeyedResponse(object):
- """Monkey response"""
- def __init__(self, full_path, headers=None, resp_json=None):
- self.full_path = full_path
- self.status_code = 200
- self.headers = headers
- self.resp_json = resp_json
-
- def json(self):
- """returns json of response"""
- return self.resp_json
-
- def raise_for_status(self):
- """always happy"""
- pass
-
-class MonkeyedNode(object):
- """node in cloudify"""
- BLUEPRINT_ID = 'test_dcae_policy_bp_id'
- DEPLOYMENT_ID = 'test_dcae_policy_dpl_id'
- EXECUTION_ID = 'test_dcae_policy_exe_id'
-
- def __init__(self, node_id, node_name, node_type, properties, relationships=None):
- self.node_id = node_id
- self.node_name = node_name
- self.ctx = MockCloudifyContextFull(
- node_id=self.node_id,
- node_name=self.node_name,
- node_type=node_type,
- blueprint_id=MonkeyedNode.BLUEPRINT_ID,
- deployment_id=MonkeyedNode.DEPLOYMENT_ID,
- execution_id=MonkeyedNode.EXECUTION_ID,
- properties=properties,
- relationships=relationships
- )
- MonkeyedLogHandler.add_handler_to(self.ctx.logger)
-
-def monkeyed_discovery_get(full_path):
- """monkeypatch for the GET to consul"""
- return MonkeyedResponse(full_path, {}, \
- [{"ServiceAddress":"monkey-policy-handler-address", "ServicePort": "9999"}])
-
-def monkeyed_policy_handler_get(full_path, headers):
+
+def monkeyed_policy_handler_get(full_path, headers=None):
"""monkeypatch for the GET to policy-engine"""
- return MonkeyedResponse(full_path, headers, \
+ return MonkeyedResponse(full_path, headers,
MonkeyedPolicyBody.create_policy(MONKEYED_POLICY_ID))
-def test_discovery(monkeypatch):
- """test finding policy-handler in consul"""
- monkeypatch.setattr('requests.get', monkeyed_discovery_get)
- expected = "http://monkey-policy-handler-address:9999/policy_latest"
- tasks.PolicyHandler._lazy_init()
- assert expected == tasks.PolicyHandler._url
def test_policy_get(monkeypatch):
"""test policy_get operation on dcae.nodes.policy node"""
+ tasks.PolicyHandler._url = tasks.PolicyHandler.DEFAULT_URL
monkeypatch.setattr('requests.get', monkeyed_policy_handler_get)
node_policy = MonkeyedNode(
'test_dcae_policy_node_id',
'test_dcae_policy_node_name',
- DCAE_POLICY_TYPE,
+ tasks.DCAE_POLICY_TYPE,
{POLICY_ID: MONKEYED_POLICY_ID}
)
@@ -189,7 +61,7 @@ def test_policy_get(monkeypatch):
CtxLogger.log_ctx_info("after policy_get")
expected = {
- POLICY_BODY : MonkeyedPolicyBody.create_policy_body(MONKEYED_POLICY_ID)
+ POLICY_BODY: MonkeyedPolicyBody.create_policy_body(MONKEYED_POLICY_ID)
}
result = node_policy.ctx.instance.runtime_properties
node_policy.ctx.logger.info("expected runtime_properties: {0}".format(
@@ -198,15 +70,122 @@ def test_policy_get(monkeypatch):
assert MonkeyedPolicyBody.is_the_same_dict(result, expected)
assert MonkeyedPolicyBody.is_the_same_dict(expected, result)
- node_ms = MonkeyedNode('test_ms_id', 'test_ms_name', "ms.nodes.type", None, \
- [{TARGET_NODE_ID: node_policy.node_id,
- TARGET_NODE_NAME: node_policy.node_name}])
+ finally:
+ MockCloudifyContextFull.clear()
+ current_ctx.clear()
+
+
+def test_policy_get_fail(monkeypatch):
+ """test policy_get operation on non dcae.nodes.policy node"""
+ tasks.PolicyHandler._url = tasks.PolicyHandler.DEFAULT_URL
+ monkeypatch.setattr('requests.get', monkeyed_policy_handler_get)
+
+ node_policy = MonkeyedNode(
+ 'test_dcae_policy_node_id',
+ 'test_dcae_policy_node_name',
+ tasks.DCAE_POLICY_TYPE,
+ {POLICY_ID: MONKEYED_POLICY_ID}
+ )
+
+ node_ms = MonkeyedNode(
+ 'test_ms_id', 'test_ms_name', "ms.nodes.type", None,
+ [{TARGET_NODE_ID: node_policy.node_id, TARGET_NODE_NAME: node_policy.node_name}]
+ )
+
+ try:
current_ctx.set(node_ms.ctx)
CtxLogger.log_ctx_info("ctx of node_ms not policy type")
with pytest.raises(NonRecoverableError) as excinfo:
tasks.policy_get()
CtxLogger.log_ctx_info("node_ms not policy type boom: {0}".format(str(excinfo.value)))
- assert "can only invoke policy_get on node of type dcae.nodes.policy" in str(excinfo.value)
+ assert "unexpected node type " in str(excinfo.value)
+
+ finally:
+ MockCloudifyContextFull.clear()
+ current_ctx.clear()
+
+
+def monkeyed_policy_handler_find(full_path, json, headers):
+ """monkeypatch for the GET to policy-engine"""
+ return MonkeyedResponse(
+ full_path, headers,
+ {LATEST_POLICIES: {
+ MONKEYED_POLICY_ID: MonkeyedPolicyBody.create_policy(MONKEYED_POLICY_ID)}}
+ )
+
+
+def test_policies_find(monkeypatch):
+ """test policy_get operation on dcae.nodes.policies node"""
+ tasks.PolicyHandler._url = tasks.PolicyHandler.DEFAULT_URL
+ monkeypatch.setattr('requests.post', monkeyed_policy_handler_find)
+
+ node_policies = MonkeyedNode(
+ 'test_dcae_policies_node_id',
+ 'test_dcae_policies_node_name',
+ tasks.DCAE_POLICIES_TYPE,
+ {
+ tasks.POLICY_FILTER: {
+ POLICY_NAME: MONKEYED_POLICY_ID,
+ tasks.CONFIG_ATTRIBUTES: json.dumps({
+ CONFIG_NAME: "alex_config_name"
+ })
+ }
+ }
+ )
+
+ try:
+ current_ctx.set(node_policies.ctx)
+ CtxLogger.log_ctx_info("before policy_get")
+ tasks.policy_get()
+ CtxLogger.log_ctx_info("after policy_get")
+
+ expected = {
+ tasks.POLICIES_FILTERED: {
+ MONKEYED_POLICY_ID: MonkeyedPolicyBody.create_policy(MONKEYED_POLICY_ID)}}
+
+ result = node_policies.ctx.instance.runtime_properties
+ node_policies.ctx.logger.info("expected runtime_properties: {0}".format(
+ json.dumps(expected)))
+ node_policies.ctx.logger.info("runtime_properties: {0}".format(json.dumps(result)))
+ assert MonkeyedPolicyBody.is_the_same_dict(result, expected)
+ assert MonkeyedPolicyBody.is_the_same_dict(expected, result)
+
+ finally:
+ MockCloudifyContextFull.clear()
+ current_ctx.clear()
+
+
+def test_policies_find_fail(monkeypatch):
+ """test policy_get operation on non dcae.nodes.policies node"""
+ tasks.PolicyHandler._url = tasks.PolicyHandler.DEFAULT_URL
+ monkeypatch.setattr('requests.post', monkeyed_policy_handler_find)
+
+ node_policies = MonkeyedNode(
+ 'test_dcae_policies_node_id',
+ 'test_dcae_policies_node_name',
+ tasks.DCAE_POLICIES_TYPE,
+ {
+ tasks.POLICY_FILTER: {
+ POLICY_NAME: MONKEYED_POLICY_ID,
+ tasks.CONFIG_ATTRIBUTES: json.dumps({
+ CONFIG_NAME: "alex_config_name"
+ })
+ }
+ }
+ )
+ node_ms_multi = MonkeyedNode(
+ 'test_ms_multi_id', 'test_ms_multi_name', "ms.nodes.type",
+ None,
+ [{TARGET_NODE_ID: node_policies.node_id, TARGET_NODE_NAME: node_policies.node_name}]
+ )
+
+ try:
+ current_ctx.set(node_ms_multi.ctx)
+ CtxLogger.log_ctx_info("ctx of node_ms_multi not policy type")
+ with pytest.raises(NonRecoverableError) as excinfo:
+ tasks.policy_get()
+ CtxLogger.log_ctx_info("node_ms_multi not policy type boom: {0}".format(str(excinfo.value)))
+ assert "unexpected node type " in str(excinfo.value)
finally:
MockCloudifyContextFull.clear()
diff --git a/dcae-policy/tox-local.ini b/dcae-policy/tox-local.ini
index 70c1319..6bd1c58 100644
--- a/dcae-policy/tox-local.ini
+++ b/dcae-policy/tox-local.ini
@@ -1,3 +1,4 @@
+# tox -c tox-local.ini | tee -a logs/test_dcaepolicyplugin.log 2>&1
[tox]
envlist = py27
@@ -12,5 +13,3 @@ setenv =
PYTHONPATH={toxinidir}
# recreate = True
commands=pytest -v --cov dcaepolicyplugin --cov-report html
-
-
diff --git a/dcae-policy/tox.ini b/dcae-policy/tox.ini
index b78ba54..40caed5 100644
--- a/dcae-policy/tox.ini
+++ b/dcae-policy/tox.ini
@@ -11,5 +11,7 @@ deps=
pytest-cov
setenv =
PYTHONPATH={toxinidir}
-# recreate = True
-commands=pytest --junitxml xunit-reports/xunit-result-dcaepolicyplugin.xml --cov dcaepolicyplugin --cov-report=xml
+commands=
+ -mkdir logs
+ pytest --junitxml xunit-results.xml --cov dcaepolicyplugin --cov-report xml
+ coverage xml
diff --git a/docker/ChangeLog.md b/docker/ChangeLog.md
index 0d0eafc..e6eb7d1 100644
--- a/docker/ChangeLog.md
+++ b/docker/ChangeLog.md
@@ -5,6 +5,19 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
+## [3.2.0]
+
+* Change requirements.txt to use a version range for dcaepolicylib
+* DCAEGEN2-442
+
+## [3.1.0]
+
+* DCAEGEN2-415 - Change requirements.txt to use dcaepolicy 2.3.0. *Apparently* this constitutes a version bump.
+
+## [3.0.0]
+
+* Update docker plugin to use dcaepolicy 2.1.0. This involved all sorts of updates in how policy is expected to work for the component where the updates are not backwards friendly.
+
## [2.4.0]
* Change *components* to be policy reconfigurable:
diff --git a/docker/README.md b/docker/README.md
index cb3e66c..6a9ce70 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -19,6 +19,10 @@ The Docker plugin requires a key-value entry in Consul that holds all the Docker
If there are no required Docker logins then set the value to empty list `[]`.
+### "consul" DNS query
+
+The Docker plugin assumes that the DNS query for "consul" will resolve. Make sure the Cloudify installation includes any steps (e.g. adding a line to `/etc/hosts`) to ensure this.
+
## Input parameters
### start
diff --git a/docker/docker-node-type.yaml b/docker/docker-node-type.yaml
index 7efc84d..0eddb0f 100644
--- a/docker/docker-node-type.yaml
+++ b/docker/docker-node-type.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
@@ -7,7 +24,63 @@ plugins:
docker:
executor: 'central_deployment_agent'
package_name: dockerplugin
- package_version: 2.4.0
+ package_version: 3.2.0
+
+
+data_types:
+ # NOTE: These data types were copied from the k8s node type in order to have
+ # consistent node properties between docker and k8s. Perhaps we should make
+ # these data_types common somehow?
+ dcae.types.MSBRegistration:
+ description: >
+ Information for registering an HTTP service into MSB. It's optional to do so,
+ but if MSB registration is desired at least the port property must be provided.
+ If 'port' property is not provided, the plugin will not do the registration.
+ (The properties all have to be declared as not required, otherwise the
+ 'msb_registration' property on the node would also be required.)
+ properties:
+ port:
+ description: The container port at which the service is exposed
+ type: string
+ required: false
+ version:
+ description: The version identifier for the service
+ type: string
+ required: false
+ url_path:
+ description: The URL path (e.g., "/api", not the full URL) to the service endpoint
+ type: string
+ required: false
+ uses_ssl:
+ description: Set to true if service endpoint uses SSL (TLS)
+ type: boolean
+ required: false
+
+ dcae.types.LoggingInfo:
+ description: >
+ Information for setting up centralized logging via ELK using a "sidecar" container.
+ If 'log_directory' is not provided, the plugin will not set up ELK logging.
+ (The properties all have to be declared as not required, otherwise the
+ 'log_info' property on the node would also be required.)
+ properties:
+ log_directory:
+ description: >
+ The path in the container where the component writes its logs.
+ If the component is following the EELF requirements, this would be
+ the directory where the four EELF files are being written.
+ (Other logs can be placed in the directory--if their names in '.log',
+ they'll also be sent into ELK.)
+ type: string
+ required: false
+ alternate_fb_path:
+ description: >
+ Hope not to use this. By default, the plugin will mount the log volume
+ at /var/log/onap/<component_type> in the sidecar container's file system.
+ 'alternate_fb_path' allows overriding the default. Will affect how the log
+ data can be found in the ELK system.
+ type: string
+ required: false
+
node_types:
# The DockerContainerForComponents node type is to be used for DCAE service components that
@@ -69,6 +142,29 @@ node_types:
type: string
description: Full uri of the Docker image
+ # The following properties are copied from k8s node type to be consistent.
+ # However, these properties are not currently being used within the docker
+ # plugin.
+ log_info:
+ type: dcae.types.LoggingInfo
+ description: >
+ Information for setting up centralized logging via ELK.
+ required: false
+
+ replicas:
+ type: integer
+ description: >
+ The number of instances of the component that should be launched initially
+ default: 1
+
+ always_pull_image:
+ type: boolean
+ description: >
+ Set to true if the orchestrator should always pull a new copy of the image
+ before deploying. By default the orchestrator pulls only if the image is
+ not already present on the Docker host where the container is being launched.
+ default: false
+
interfaces:
cloudify.interfaces.lifecycle:
create:
@@ -187,6 +283,35 @@ node_types:
Network port that the platform service exposes in the container
default: 0
+ # The following properties are copied from k8s node type to be consistent.
+ # However, these properties are not currently being used within the docker
+ # plugin.
+ msb_registration:
+ type: dcae.types.MSBRegistration
+ description: >
+ Information for registering with MSB
+ required: false
+
+ log_info:
+ type: dcae.types.LoggingInfo
+ description: >
+ Information for setting up centralized logging via ELK.
+ required: false
+
+ replicas:
+ type: integer
+ description: >
+ The number of instances of the component that should be launched initially
+ default: 1
+
+ always_pull_image:
+ type: boolean
+ description: >
+ Set to true if the orchestrator should always pull a new copy of the image
+ before deploying. By default the orchestrator pulls only if the image is
+ not already present on the Docker host where the container is being launched.
+ default: false
+
interfaces:
cloudify.interfaces.lifecycle:
create:
diff --git a/docker/dockerplugin/tasks.py b/docker/dockerplugin/tasks.py
index ff2f2af..03eba62 100644
--- a/docker/dockerplugin/tasks.py
+++ b/docker/dockerplugin/tasks.py
@@ -25,7 +25,7 @@ from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError, RecoverableError
import dockering as doc
-from onap_dcae_dcaepolicy_lib import Policies, POLICIES, POLICY_MESSAGE_TYPE
+from onap_dcae_dcaepolicy_lib import Policies
from dockerplugin import discovery as dis
from dockerplugin.decorators import monkeypatch_loggers, wrap_error_handling_start, \
merge_inputs_for_start, merge_inputs_for_create
@@ -35,9 +35,9 @@ from dockerplugin import utils
# TODO: Remove this Docker port hardcoding and query for this port instead
DOCKER_PORT = 2376
-# Always use the local Consul agent for interfacing with Consul from the plugin.
-# Safe to assume that its always there.
-CONSUL_HOST = "localhost"
+# Rely on the setup of the cloudify manager host to resolve "consul" for the
+# plugin. NOTE: This variable is not passed to components.
+CONSUL_HOST = "consul"
# Used to construct delivery urls for data router subscribers. Data router in FTL
# requires https but this author believes that ONAP is to be defaulted to http.
@@ -113,15 +113,10 @@ def _done_for_create(**kwargs):
ctx.logger.info("Done setting up: {0}".format(name))
return kwargs
-def _merge_policy_updates(**kwargs):
- app_config = kwargs[APPLICATION_CONFIG]
- kwargs[APPLICATION_CONFIG] = Policies.shallow_merge_policies_into(app_config)
- return kwargs
-
@merge_inputs_for_create
@monkeypatch_loggers
-@Policies.gather_policies_to_node
+@Policies.gather_policies_to_node()
@operation
def create_for_components(**create_inputs):
"""Create step for Docker containers that are components
@@ -133,9 +128,8 @@ def create_for_components(**create_inputs):
"""
_done_for_create(
**_setup_for_discovery(
- **_merge_policy_updates(
**_generate_component_name(
- **create_inputs))))
+ **create_inputs)))
def _parse_streams(**kwargs):
@@ -204,7 +198,7 @@ def _setup_for_discovery_streams(**kwargs):
@merge_inputs_for_create
@monkeypatch_loggers
-@Policies.gather_policies_to_node
+@Policies.gather_policies_to_node()
@operation
def create_for_components_with_streams(**create_inputs):
"""Create step for Docker containers that are components that use DMaaP
@@ -219,10 +213,9 @@ def create_for_components_with_streams(**create_inputs):
_done_for_create(
**_setup_for_discovery(
**_setup_for_discovery_streams(
- **_merge_policy_updates(
**_parse_streams(
**_generate_component_name(
- **create_inputs))))))
+ **create_inputs)))))
@merge_inputs_for_create
@@ -451,6 +444,8 @@ def _update_delivery_url(**kwargs):
for dr_sub in dr_subs:
scheme = dr_sub["scheme"] if "scheme" in dr_sub else DEFAULT_SCHEME
+ if "route" not in dr_sub:
+ raise NonRecoverableError("'route' key missing from data router subscriber")
path = dr_sub["route"]
dr_sub["delivery_url"] = "{scheme}://{host}/{path}".format(
scheme=scheme, host=subscriber_host, path=path)
@@ -584,6 +579,7 @@ def stop_and_remove_container(**kwargs):
raise NonRecoverableError(e)
@monkeypatch_loggers
+@Policies.cleanup_policies_on_node
@operation
def cleanup_discovery(**kwargs):
"""Delete configuration from Consul"""
@@ -604,11 +600,15 @@ def _notify_container(**kwargs):
if dc["policy"]["trigger_type"] == "docker":
# REVIEW: Need to finalize on the docker config policy data structure
script_path = dc["policy"]["script_path"]
- app_config = kwargs["application_config"]
updated_policies = kwargs["updated_policies"]
+ removed_policies = kwargs["removed_policies"]
+ policies = kwargs["policies"]
cmd = doc.build_policy_update_cmd(script_path, use_sh=False,
+ msg_type="policies",
updated_policies=updated_policies,
- application_config=app_config)
+ removed_policies=removed_policies,
+ policies=policies
+ )
docker_host = kwargs[SELECTED_CONTAINER_DESTINATION]
docker_host_ip = _lookup_service(docker_host)
@@ -622,16 +622,10 @@ def _notify_container(**kwargs):
return kwargs
-def _done_for_policy_update(**kwargs):
- name = kwargs['name']
- ctx.instance.runtime_properties.update(kwargs)
- ctx.logger.info("Done updating for policy: {0}".format(name))
- return kwargs
-
@monkeypatch_loggers
-@Policies.update_policies_on_node(configs_only=True)
+@Policies.update_policies_on_node()
@operation
-def policy_update(updated_policies, **kwargs):
+def policy_update(updated_policies, removed_policies=None, policies=None, **kwargs):
"""Policy update task
This method is responsible for updating the application configuration and
@@ -643,12 +637,10 @@ def policy_update(updated_policies, **kwargs):
"""
update_inputs = copy.deepcopy(ctx.instance.runtime_properties)
update_inputs["updated_policies"] = updated_policies
+ update_inputs["removed_policies"] = removed_policies
+ update_inputs["policies"] = policies
- # Merge in policy updates into application config and make available
- _done_for_policy_update(
- **_notify_container(
- **_setup_for_discovery(
- **_merge_policy_updates(**update_inputs))))
+ _notify_container(**update_inputs)
# Lifecycle interface calls for dcae.nodes.DockerHost
diff --git a/docker/examples/blueprint-laika-dmaap-pubs.yaml b/docker/examples/blueprint-laika-dmaap-pubs.yaml
index 4616b0c..6462227 100644
--- a/docker/examples/blueprint-laika-dmaap-pubs.yaml
+++ b/docker/examples/blueprint-laika-dmaap-pubs.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
description: >
diff --git a/docker/examples/blueprint-laika-dmaap-pubsub.yaml b/docker/examples/blueprint-laika-dmaap-pubsub.yaml
index bcbbb17..c6099a2 100644
--- a/docker/examples/blueprint-laika-dmaap-pubsub.yaml
+++ b/docker/examples/blueprint-laika-dmaap-pubsub.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
description: >
diff --git a/docker/examples/blueprint-laika-dmaap-subs.yaml b/docker/examples/blueprint-laika-dmaap-subs.yaml
index f6d0b3a..ec28668 100644
--- a/docker/examples/blueprint-laika-dmaap-subs.yaml
+++ b/docker/examples/blueprint-laika-dmaap-subs.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
description: >
diff --git a/docker/examples/blueprint-laika-policy.yaml b/docker/examples/blueprint-laika-policy.yaml
index d0a9c68..f6b6925 100644
--- a/docker/examples/blueprint-laika-policy.yaml
+++ b/docker/examples/blueprint-laika-policy.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
description: >
@@ -5,7 +22,7 @@ description: >
imports:
- http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
- - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/docker/2.3.0/node-type.yaml
+ - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/docker/3.0.0/node-type.yaml
- {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/relationship/1.0.0/node-type.yaml
- {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/dcaepolicy/1.0.0/node-type.yaml
diff --git a/docker/examples/blueprint-laika.yaml b/docker/examples/blueprint-laika.yaml
index 0db03b8..8ef6f0c 100644
--- a/docker/examples/blueprint-laika.yaml
+++ b/docker/examples/blueprint-laika.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
description: >
diff --git a/docker/examples/blueprint-registrator.yaml b/docker/examples/blueprint-registrator.yaml
index d9d6449..fbfd7d9 100644
--- a/docker/examples/blueprint-registrator.yaml
+++ b/docker/examples/blueprint-registrator.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
description: >
diff --git a/docker/pom.xml b/docker/pom.xml
index be11078..236a4fb 100644
--- a/docker/pom.xml
+++ b/docker/pom.xml
@@ -28,25 +28,17 @@ ECOMP is a trademark and service mark of AT&T Intellectual Property.
<groupId>org.onap.dcaegen2.platform.plugins</groupId>
<artifactId>docker</artifactId>
<name>docker-plugin</name>
- <version>1.1.0-SNAPSHOT</version>
+ <version>3.2.0-SNAPSHOT</version>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <sonar.skip>true</sonar.skip>
<sonar.sources>.</sonar.sources>
- <!-- customize the SONARQUBE URL -->
- <!-- sonar.host.url>http://localhost:9000</sonar.host.url -->
- <!-- below are language dependent -->
- <!-- for Python -->
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPath>coverage.xml</sonar.python.coverage.reportPath>
<sonar.language>py</sonar.language>
<sonar.pluginName>Python</sonar.pluginName>
<sonar.inclusions>**/*.py</sonar.inclusions>
- <!-- for JavaScaript -->
- <!--
- <sonar.language>js</sonar.language>
- <sonar.pluginName>JS</sonar.pluginName>
- <sonar.inclusions>**/*.js</sonar.inclusions>
- -->
+ <sonar.exclusions>tests/*,setup.py</sonar.exclusions>
</properties>
<build>
<finalName>${project.artifactId}-${project.version}</finalName>
diff --git a/docker/requirements.txt b/docker/requirements.txt
index 7e04a5f..257a641 100644
--- a/docker/requirements.txt
+++ b/docker/requirements.txt
@@ -1,2 +1,2 @@
onap-dcae-dockering==1.4.0
-onap-dcae-dcaepolicy-lib==1.0.0
+onap-dcae-dcaepolicy-lib>=2.3.0,<3.0.0
diff --git a/docker/setup.py b/docker/setup.py
index ececb43..4f521c1 100644
--- a/docker/setup.py
+++ b/docker/setup.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ from setuptools import setup
setup(
name='dockerplugin',
description='Cloudify plugin for applications run in Docker containers',
- version="2.4.0",
+ version="3.2.0",
author='Michael Hwang, Tommy Carpenter',
packages=['dockerplugin'],
zip_safe=False,
@@ -32,6 +32,6 @@ setup(
"python-consul>=0.6.0,<1.0.0",
"onap-dcae-dockering>=1.0.0,<2.0.0",
"uuid==1.30",
- "onap-dcae-dcaepolicy-lib>=1.0.0"
+ "onap-dcae-dcaepolicy-lib>=2.3.0,<3.0.0"
]
)
diff --git a/docker/tests/test_decorators.py b/docker/tests/test_decorators.py
new file mode 100644
index 0000000..403e39f
--- /dev/null
+++ b/docker/tests/test_decorators.py
@@ -0,0 +1,36 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+from dockerplugin import decorators as dec
+
+
+def test_wrapper_merge_inputs():
+ properties = { "app_config": {"nested": { "a": 123, "b": 456 }, "foo": "duh"},
+ "image": "some-docker-image" }
+ kwargs = { "app_config": {"nested": {"a": 789, "c": "zyx"}} }
+
+ def task_func(**inputs):
+ return inputs
+
+ expected = { "app_config": {"nested": { "a": 789, "b": 456, "c": "zyx" },
+ "foo": "duh"}, "image": "some-docker-image" }
+
+ assert expected == dec._wrapper_merge_inputs(task_func, properties, **kwargs)
+
diff --git a/docker/tests/test_discovery.py b/docker/tests/test_discovery.py
index cee75b1..f3aed66 100644
--- a/docker/tests/test_discovery.py
+++ b/docker/tests/test_discovery.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -40,6 +40,12 @@ def test_wrap_consul_call():
wrapped_foo("a", "b", "c")
+def test_generate_service_component_name():
+ component_type = "some-component-type"
+ name = dis.generate_service_component_name(component_type)
+ assert name.split("_")[1] == component_type
+
+
def test_find_matching_services():
services = { "component_dockerhost_1": ["foo", "bar"],
"platform_dockerhost": [], "component_dockerhost_2": ["baz"] }
@@ -53,3 +59,11 @@ def test_find_matching_services():
"component_dockerhost", ["foo"])
assert [] == dis._find_matching_services(services, "unknown", ["foo"])
+
+
+def test_is_healthy_pure():
+ def fake_is_healthy(name):
+ return 0, [{ "Checks": [{"Status": "passing"}] }]
+
+ assert True == dis._is_healthy_pure(fake_is_healthy, "some-component")
+
diff --git a/docker/tests/test_tasks.py b/docker/tests/test_tasks.py
index 6661532..c58d02c 100644
--- a/docker/tests/test_tasks.py
+++ b/docker/tests/test_tasks.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,9 +20,10 @@
import copy
import pytest
-from cloudify.exceptions import NonRecoverableError
+from cloudify.exceptions import NonRecoverableError, RecoverableError
import dockerplugin
from dockerplugin import tasks
+from dockerplugin.exceptions import DockerPluginDeploymentError
def test_generate_component_name():
@@ -89,6 +90,28 @@ def test_parse_streams(monkeypatch):
assert expected == tasks._parse_streams(**test_input)
+def test_setup_for_discovery(monkeypatch):
+ test_input = { "name": "some-name",
+ "application_config": { "one": "a", "two": "b" } }
+
+ def fake_push_config(conn, name, application_config):
+ return
+
+ monkeypatch.setattr(dockerplugin.discovery, "push_service_component_config",
+ fake_push_config)
+
+ assert test_input == tasks._setup_for_discovery(**test_input)
+
+ def fake_push_config_connection_error(conn, name, application_config):
+ raise dockerplugin.discovery.DiscoveryConnectionError("Boom")
+
+ monkeypatch.setattr(dockerplugin.discovery, "push_service_component_config",
+ fake_push_config_connection_error)
+
+ with pytest.raises(RecoverableError):
+ tasks._setup_for_discovery(**test_input)
+
+
def test_setup_for_discovery_streams(monkeypatch):
test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
'username': 'hero', 'password': '123456', 'location': 'Bedminster'},
@@ -147,6 +170,37 @@ def test_setup_for_discovery_streams(monkeypatch):
tasks._setup_for_discovery_streams(**test_input)
+def test_lookup_service(monkeypatch):
+ def fake_lookup(conn, scn):
+ return [{"ServiceAddress": "192.168.1.1", "ServicePort": "80"}]
+
+ monkeypatch.setattr(dockerplugin.discovery, "lookup_service",
+ fake_lookup)
+
+ assert "192.168.1.1" == tasks._lookup_service("some-component")
+ assert "192.168.1.1:80" == tasks._lookup_service("some-component",
+ with_port=True)
+
+
+def test_verify_container(monkeypatch):
+ def fake_is_healthy_success(ch, scn):
+ return True
+
+ monkeypatch.setattr(dockerplugin.discovery, "is_healthy",
+ fake_is_healthy_success)
+
+ assert tasks._verify_container("some-name", 3)
+
+ def fake_is_healthy_never_good(ch, scn):
+ return False
+
+ monkeypatch.setattr(dockerplugin.discovery, "is_healthy",
+ fake_is_healthy_never_good)
+
+ with pytest.raises(DockerPluginDeploymentError):
+ tasks._verify_container("some-name", 2)
+
+
def test_update_delivery_url(monkeypatch):
test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
'username': 'hero', 'password': '123456', 'location': 'Bedminster',
@@ -216,3 +270,8 @@ def test_enhance_docker_params():
actual = tasks._enhance_docker_params(**test_kwargs)
assert actual["envs"] == {"SERVICE_TAGS": "abc,zed"}
+
+
+def test_notify_container():
+ test_input = { "docker_config": { "trigger_type": "unknown" } }
+ assert test_input == tasks._notify_container(**test_input)
diff --git a/docker/tests/test_utils.py b/docker/tests/test_utils.py
new file mode 100644
index 0000000..4578dae
--- /dev/null
+++ b/docker/tests/test_utils.py
@@ -0,0 +1,32 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+from dockerplugin import utils
+
+
+def test_random_string():
+ target_length = 10
+ assert len(utils.random_string(target_length)) == target_length
+
+
+def test_update_dict():
+ d = { "a": 1, "b": 2 }
+ u = { "a": 2, "b": 3 }
+ assert utils.update_dict(d, u) == u
diff --git a/docker/tox.ini b/docker/tox.ini
index 9a4b7f8..9e3cfb2 100644
--- a/docker/tox.ini
+++ b/docker/tox.ini
@@ -4,8 +4,13 @@ envlist = py27
[testenv]
deps=
+ -rrequirements.txt
cloudify-plugins-common==3.4
pytest
coverage
pytest-cov
-commands=pytest --junitxml xunit-results.xml --cov {envsitepackagesdir}/dockerplugin --cov-report=xml
+setenv=
+ PYTHONPATH={toxinidir}
+commands=
+ pytest --junitxml xunit-results.xml --cov dockerplugin --cov-report xml
+ coverage xml
diff --git a/k8s/.gitignore b/k8s/.gitignore
new file mode 100644
index 0000000..be63b67
--- /dev/null
+++ b/k8s/.gitignore
@@ -0,0 +1,71 @@
+cfyhelper
+.cloudify
+*.swp
+*.swn
+*.swo
+.DS_Store
+.project
+.pydevproject
+venv
+
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coveragerc
+.coverage.*
+.cache
+.pytest_cache/
+xunit-results.xml
+nosetests.xml
+coverage.xml
+*,cover
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
diff --git a/k8s/ChangeLog.md b/k8s/ChangeLog.md
new file mode 100644
index 0000000..bf3353e
--- /dev/null
+++ b/k8s/ChangeLog.md
@@ -0,0 +1,24 @@
+# Change Log
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/)
+and this project adheres to [Semantic Versioning](http://semver.org/).
+
+
+## [1.3.0]
+* Enhancement: Add support for changing the image running in the application container. ("Rolling upgrade")
+
+## [1.2.0]
+* Enhancement: Use the "healthcheck" parameters from node_properties to set up a
+Kubernetes readiness probe for the container.
+
+## [1.1.0]
+* Enhancement: When Cloudify Manager is running in a Docker container in a Kubernetes environment, the plugin can use the Kubernetes API credentials set up by Kubernetes.
+
+## [1.0.1]
+* Fixes a bug in passing environment variables.
+
+## [1.0.0]
+
+* Initial release of the Kubernetes plugin. It is built on the [Docker plugin](../docker) and preserves the Docker plugin's integration with the policy plugin and the DMaaP plugin.
diff --git a/k8s/DesignNotes.md b/k8s/DesignNotes.md
new file mode 100644
index 0000000..556e0f0
--- /dev/null
+++ b/k8s/DesignNotes.md
@@ -0,0 +1,13 @@
+## Design Notes: DCAE on Kubernetes
+### Background: Previous Support for DCAE on Docker
+### Key Design Elements for DCAE on Kubernetes
+#### Use of Kubernetes Abstractions
+#### Networking
+#### Service Discovery
+#### Component Configuration
+#### Health Checking and Resiliency
+#### Scaling
+#### Software Updates
+### Changes to Cloudify Type Definitions
+### Impact to Other DCAE Components
+### Status of Work
diff --git a/k8s/LICENSE.txt b/k8s/LICENSE.txt
new file mode 100644
index 0000000..e266d0a
--- /dev/null
+++ b/k8s/LICENSE.txt
@@ -0,0 +1,32 @@
+============LICENSE_START=======================================================
+org.onap.dcae
+================================================================================
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+
+ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+===================================================================
+Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+you may not use this documentation except in compliance with the License.
+You may obtain a copy of the License at
+ https://creativecommons.org/licenses/by/4.0/
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/k8s/README.md b/k8s/README.md
new file mode 100644
index 0000000..5b2d0da
--- /dev/null
+++ b/k8s/README.md
@@ -0,0 +1,293 @@
+# ONAP DCAE Kubernetes Plugin for Cloudify
+
+This directory contains a Cloudify plugin used to orchestrate the deployment of containerized DCAE platform and service components into a Kubernetes ("k8s")
+environment. This work is based on the [ONAP DCAE Docker plugin] (../docker).
+
+This plugin is *not* a generic Kubernetes plugin that exposes the full set of Kubernetes features.
+In fact, the plugin largely hides the fact that we're using Kubernetes from both component developers and blueprint authors.
+The Cloudify node type definitions are very similar to the Cloudify type definitions used in the ONAP DCAE Docker plugin.
+
+For the node types `ContainerizedPlatformComponent`, `ContainerizedServiceComponent`, and `ContainerizedServiceComponentUsingDmaap`, this plugin
+creates the following Kubernetes entities:
+
+- A Kubernetes `Deployment` containing information about what containers to run and what volume to mount.
+ - The `Deployment` always includes a container that runs the component's Docker image
+ - The `Deployment` includes any volumes specified in the blueprint
+ - If the blueprint specifies a logging directory via the `log_info` property, the `Deployment` includes a second container,
+ running the `filebeat` logging sidecar that ships logging information to the ONAP ELK stack. The `Deployment` will include
+ some additional volumes needed by filebeat.
+- If the blueprint indicates that the component exposes any ports, the plugin will create a Kubernetes `Service` that allocates an address
+ in the Kubernetes network address space that will route traffic to a container that's running the component. This `Service` provides a
+ fixed "virtual IP" for the component.
+- If the blueprint indicates that the component exposes a port externally, the plugin will create an additional Kubernetes `Service` that opens up a
+ port on the external interface of each node in the Kubernetes cluster.
+
+Through the `replicas` property, a blueprint can request deployment of multiple instances of the component. The plugin will still create a single `Deployment` (and,
+if needed one or two `Services`), but the `Deployment` will cause multiple instances of the container to run. (Specifically, the `Deployment` will create
+a Kubernetes `Pod` for each requested instance.) Other entities connect to a component via the IP address of a `Service`, and Kubernetes takes care of routing
+traffic to an appropriate container instance.
+
+## Pre-requisites
+### Configuration
+#### Configuration file
+The plugin expects a configuration file in the Python "ini" format to be stored at `/opt/onap/config.txt`. This file contains the address of the Consul cluster.
+Here is an example:
+```
+[consul]
+address=10.12.5.115:30270
+```
+#### Configuration entry in Consul
+Additional configuration information is stored in the Consul KV store under the key `k8s-plugin`.
+The configuration is provided as JSON object with the following properties:
+
+ - namespace: k8s namespace to use for DCAE
+ - consul_dns_name: k8s internal DNS name for Consul (passed to containers)
+ - image_pull_secrets: list of names of k8s secrets for accessing Docker registries, with the following properties:
+ - filebeat: object containing onfiguration for setting up filebeat container
+ - log_path: mount point for log volume in filebeat container
+ - data_path: mount point for data volume in filebeat container
+ - config_path: mount point for config volume in filebeat container
+ - config_subpath: subpath for config data in filebeat container
+ - config_map: name of a ConfigMap holding the filebeat configuration file
+ - image: Docker image to use for filebeat
+
+#### Kubernetes access information
+The plugin accesses a Kubernetes cluster. The information and credentials for accessing a cluster are stored in a "kubeconfig"
+file. The plugin expects to find such a file at `/etc/cloudify/.kube/config`.
+
+#### Additional Kubernetes configuration elements
+The plugin expects certain elements to be provided in the DCAE/ONAP environment, namely:
+ - Kubernetes secret(s) containing the credentials needed to pull images from Docker registries, if needed
+ - A Kubernetes ConfigMap containing the filebeat.yml file used by the filebeat logging container
+ - An ExternalName service
+
+## Input parameters
+
+### `start` operation parameters
+
+These input parameters are for the `start` `cloudify.interfaces.lifecycle` and are inputs into the variant task operations `create_and_start_container*`.
+
+#### `envs`
+
+A map of environment variables that is intended to be forwarded to the container as environment variables. Example:
+
+```yaml
+envs:
+ EXTERNAL_IP: '10.100.1.99'
+```
+
+These environment variables will be forwarded in addition to the *platform-related* environment variables like `CONSUL_HOST`.
+
+#### `volumes`
+
+List of maps used for setting up Kubernetes volume mounts. Example:
+
+```yaml
+volumes:
+ - host:
+ path: '/var/run/docker.sock'
+ container:
+ bind: '/tmp/docker.sock'
+ mode: 'ro'
+```
+
+The table below describes the fields.
+
+key | description
+--- | -----------
+path | Full path to the file or directory on the host machine to be mounted
+bind | Full path to the file or directory in the container where the volume should be mounted to
+mode | Readable, writeable: `ro`, `rw`
+
+#### `ports`
+
+List of strings - Used to bind container ports to host ports. Each item is of the format: `<container port>:<host port>`.
+
+Note that `ContainerizedPlatformComponent` has the property pair `host_port` and `container_port`. This pair will be merged with the input parameters ports.
+
+```yaml
+ports:
+ - '8000:31000'
+```
+
+Default is `None`.
+
+In the Kubernetes environment, most components will communicate over the Kubernetes network using private addresses. For those cases,
+setting the `<host port>` to 0 will expose the `<container port>` to other components on the Kubernetes network, but not will not expose any
+ports on the Kubernetes host's external interface. Setting `<host port>` to a non-zero value will expose that port on the external interfaces
+of every Kubernetes host in the cluster. (This uses the Kubernetes `NodePort` service type.)
+
+#### `max_wait`
+
+Integer - seconds to wait for component to become ready before throwing a `NonRecoverableError`. For example:
+
+```yaml
+max_wait:
+ 60
+```
+
+Default is 300 seconds.
+
+## Using DMaaP
+
+The node type `dcae.nodes.ContainerizedServiceComponentUsingDmaap` is intended to be used by components that use DMaaP and expects to be connected with the DMaaP node types found in the DMaaP plugin.
+
+### Node properties
+
+The properties `streams_publishes` and `streams_subscribes` both are lists of objects that are intended to be passed into the DMaaP plugin and used to create additional parameters that will be passed into the DMaaP plugin.
+
+#### Message router
+
+For message router publishers and subscribers, the objects look like:
+
+```yaml
+name: topic00
+location: mtc5
+client_role: XXXX
+type: message_router
+```
+
+Where `name` is the node name of `dcae.nodes.Topic` or `dcae.nodes.ExistingTopic` that the Docker node is connecting with via the relationships `dcae.relationships.publish_events` for publishing and `dcae.relationships.subscribe_to_events` for subscribing.
+
+#### Data router
+
+For data router publishers, the object looks like:
+
+```yaml
+name: feed00
+location: mtc5
+type: data_router
+```
+
+Where `name` is the node name of `dcae.nodes.Feed` or `dcae.nodes.ExistingFeed` that the Docker node is connecting with via the relationships `dcae.relationships.publish_files`.
+
+For data router subscribers, the object looks like:
+
+```yaml
+name: feed00
+location: mtc5
+type: data_router
+username: king
+password: "123456"
+route: some-path
+scheme: https
+```
+
+Where the relationship to use is `dcae.relationships.subscribe_to_files`.
+
+If `username` and `password` are not provided, then the plugin will generate username and password pair.
+
+`route` and `scheme` are parameter used in the dynamic construction of the delivery url which will be passed to the DMaaP plugin to be used in the setting up of the subscriber to the feed.
+
+`route` is the http path endpoint of the subscriber that will handle files from the associated feed.
+
+`scheme` is either `http` or `https`. If not specified, then the plugin will default to `http`.
+
+### Component configuration
+
+The DMaaP plugin is responsible to provision the feed/topic and store into Consul the resulting DMaaP connection details. Here is an example:
+
+```json
+{
+ "topic00": {
+ "client_role": "XXXX",
+ "client_id": "XXXX",
+ "location": "XXXX",
+ "topic_url": "https://some-topic-url.com/events/abc"
+ }
+}
+```
+
+This is to be merged with the templatized application configuration:
+
+```json
+{
+ "some-param": "Lorem ipsum dolor sit amet",
+ "streams_subscribes": {
+ "topic-alpha": {
+ "type": "message_router",
+ "aaf_username": "user-foo",
+ "aaf_password": "password-bar",
+ "dmaap_info": "<< topic00 >>"
+ },
+ },
+ "streams_publishes": {},
+ "services_calls": {}
+}
+```
+
+To form the application configuration:
+
+```json
+{
+ "some-param": "Lorem ipsum dolor sit amet",
+ "streams_subscribes": {
+ "topic-alpha": {
+ "type": "message_router",
+ "aaf_username": "user-foo",
+ "aaf_password": "password-bar",
+ "dmaap_info": {
+ "client_role": "XXXX",
+ "client_id": "XXXX",
+ "location": "XXXX",
+ "topic_url": "https://some-topic-url.com/events/abc"
+ }
+ },
+ },
+ "streams_publishes": {},
+ "services_calls": {}
+}
+```
+
+This also applies to data router feeds.
+
+## Additional Operations Supported by the Plugin
+In addition to supporting the Cloudify `install` and `uninstall` workflows, the plugin provides two standalone operations that can be invoked using the Cloudify [`execute_operation` workflow](https://docs.cloudify.co/4.3.0/working_with/workflows/built-in-workflows/). The `dcae.nodes.ContainerizedApplication`, `dcae.nodes.ContainerizedPlatformComponent`, `dcae.nodes.ContainerizedServiceComponent`, and `dcae.nodes.ContainerizedServiceComponentUsingDmaap` node types support these operations.
+
+Currently, there's no convenient high-level interface to trigger these operations, but they could potentially be exposed through some kind of dashboard.
+
+### Scaling Operation (`scale`)
+The `scale` operation provides a way to change the number of replicas running for a node instance. The operation is implemented by modifying the number of replicas in the Kubernetes Deployment specification associated with a node instance and submitting the updated specification to the Kubernetes API. The scale operation works for increasing the number of replicas as well as decreasing the number of replications. The minimum number of replicas is 1.
+
+The `scale` operation takes two parameters:
+- `replicas`: Number of desired replicas. Integer, required.
+- `max_wait`: Number of seconds to wait for successful completion of the operation. Integer, optional, defaults to 300 seconds.
+
+One way to trigger a `scale` operation is by using the Cloudify command line. For example:
+```
+cfy executions start -d my_deployment -p scale_params.yaml execute_operation
+```
+where `my_deployment` is the name of an existing Cloudify deployment and
+`scale_params.yaml` is a a file containing the operation parameters:
+```
+operation: scale
+operation_kwargs:
+ replicas: 3
+node_ids:
+ - "web_server"
+```
+Note that the `node_ids` list is required by the `execute_operation` workflow. The list contains all of the nodes that are being targeted by the workflow. If a blueprint contains more than one node, it's possible to scale all of them--or some subset--with a single workflow execution.
+
+### Image Update Operation (`image_update`)
+The `update_image` operation provides a way to change the Docker image running for a node instance, using the Kubernetes _rolling update_ strategy. (See this [tutorial](https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/) and this [discussion of the concept](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#updating-a-deployment) in the Kubernetes documentation.) The operation is implemented by modifying the image property in the Kubernetes Deployment specification associated with a node instance and submitting the updated specification to the Kubernetes API.
+
+The `update_image` operation takes two parameters:
+- `image`: Full name (including registry, if not the default Docker registry, and tag) of the new image to use for the component. String, required.
+- `max_wait`: Number of seconds to wait for successful completion of the operation. Integer, optional, defaults to 300 seconds.
+
+The `update_image` operation can be triggered using the Cloudify command line. For example:
+```
+cfy executions start -d my_deployment -p update_params.yaml execute_operation
+```
+where `my_deployment` is the name of an existing Cloudify deployment and
+`update_params.yaml` is a a file containing the operation parameters:
+```
+operation: update_image
+operation_kwargs:
+ image: myrepository.example.com/server/web:1.15
+node_ids:
+ - "web_server"
+```
+Note that the `node_ids` list is required by the `execute_operation` workflow. The list contains all of the nodes that are being targeted by the workflow. For an `update_image` operation, the list typically has only one element.
+
+Note also that the `update_image` operation targets the container running the application code (i.e., the container running the image specified in the `image` node property). This plugin may deploy "sidecar" containers running supporting code--for example, the "filebeat" container that relays logs to the central log server. The `update_image` operation does not touch any "sidecar" containers. \ No newline at end of file
diff --git a/k8s/configure/__init__.py b/k8s/configure/__init__.py
new file mode 100644
index 0000000..b986659
--- /dev/null
+++ b/k8s/configure/__init__.py
@@ -0,0 +1,19 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property. \ No newline at end of file
diff --git a/k8s/configure/configure.py b/k8s/configure/configure.py
new file mode 100644
index 0000000..fcf4044
--- /dev/null
+++ b/k8s/configure/configure.py
@@ -0,0 +1,83 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+_CONFIG_PATH = "/opt/onap/config.txt" # Path to config file on the Cloudify Manager host
+_CONSUL_KEY = "k8s-plugin" # Key under which CM configuration is stored in Consul
+
+# Default configuration values
+DCAE_NAMESPACE = "dcae"
+CONSUL_DNS_NAME = "consul"
+
+FB_LOG_PATH = "/var/log/onap"
+FB_DATA_PATH = "/usr/share/filebeat/data"
+FB_CONFIG_PATH = "/usr/share/filebeat/filebeat.yml"
+FB_CONFIG_SUBPATH = "filebeat.yml"
+FB_CONFIG_MAP = "filebeat-conf"
+FB_IMAGE = "docker.elastic.co/beats/filebeat:5.5.0"
+
+def _set_defaults():
+ """ Set default configuration parameters """
+ return {
+ "namespace" : DCAE_NAMESPACE, # k8s namespace to use for DCAE
+ "consul_dns_name" : CONSUL_DNS_NAME, # k8s internal DNS name for Consul
+ "image_pull_secrets" : [], # list of k8s secrets for accessing Docker registries
+ "filebeat": { # Configuration for setting up filebeat container
+ "log_path" : FB_LOG_PATH, # mount point for log volume in filebeat container
+ "data_path" : FB_DATA_PATH, # mount point for data volume in filebeat container
+ "config_path" : FB_CONFIG_PATH, # mount point for config volume in filebeat container
+ "config_subpath" : FB_CONFIG_SUBPATH, # subpath for config data in filebeat container
+ "config_map" : FB_CONFIG_MAP, # ConfigMap holding the filebeat configuration
+ "image": FB_IMAGE # Docker image to use for filebeat
+ }
+ }
+
+def configure(config_path=_CONFIG_PATH, key = _CONSUL_KEY):
+ """
+ Get configuration information from local file and Consul.
+ Note that the Cloudify context ("ctx") isn't available at
+ module load time.
+ """
+
+ from cloudify.exceptions import NonRecoverableError
+ import ConfigParser
+ from k8splugin import discovery
+ config = _set_defaults()
+
+ try:
+ # Get Consul address from a config file
+ c = ConfigParser.ConfigParser()
+ c.read(config_path)
+ config["consul_host"] = c.get('consul','address')
+
+ # Get the rest of the config from Consul
+ conn = discovery.create_kv_conn(config["consul_host"])
+ val = discovery.get_kv_value(conn, key)
+
+ # Merge Consul results into the config
+ config.update(val)
+
+ except discovery.DiscoveryKVEntryNotFoundError as e:
+ # Don't reraise error, assume defaults are wanted.
+ pass
+
+ except Exception as e:
+ raise NonRecoverableError(e)
+
+ return config
diff --git a/k8s/k8s-node-type.yaml b/k8s/k8s-node-type.yaml
new file mode 100644
index 0000000..7d64500
--- /dev/null
+++ b/k8s/k8s-node-type.yaml
@@ -0,0 +1,325 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+
+plugins:
+ k8s:
+ executor: 'central_deployment_agent'
+ package_name: k8splugin
+ package_version: 1.4.0
+
+data_types:
+
+ dcae.types.MSBRegistration:
+ description: >
+ Information for registering an HTTP service into MSB. It's optional to do so,
+ but if MSB registration is desired at least the port property must be provided.
+ If 'port' property is not provided, the plugin will not do the registration.
+ (The properties all have to be declared as not required, otherwise the
+ 'msb_registration' property on the node would also be required.)
+ properties:
+ port:
+ description: The container port at which the service is exposed
+ type: string
+ required: false
+ version:
+ description: The version identifier for the service
+ type: string
+ required: false
+ url_path:
+ description: The URL path (e.g., "/api", not the full URL) to the service endpoint
+ type: string
+ required: false
+ uses_ssl:
+ description: Set to true if service endpoint uses SSL (TLS)
+ type: boolean
+ required: false
+
+ dcae.types.LoggingInfo:
+ description: >
+ Information for setting up centralized logging via ELK using a "sidecar" container.
+ If 'log_directory' is not provided, the plugin will not set up ELK logging.
+ (The properties all have to be declared as not required, otherwise the
+ 'log_info' property on the node would also be required.)
+ properties:
+ log_directory:
+ description: >
+ The path in the container where the component writes its logs.
+ If the component is following the EELF requirements, this would be
+ the directory where the four EELF files are being written.
+ (Other logs can be placed in the directory--if their names in '.log',
+ they'll also be sent into ELK.)
+ type: string
+ required: false
+ alternate_fb_path:
+ description: >
+ Hope not to use this. By default, the plugin will mount the log volume
+ at /var/log/onap/<component_type> in the sidecar container's file system.
+ 'alternate_fb_path' allows overriding the default. Will affect how the log
+ data can be found in the ELK system.
+ type: string
+ required: false
+
+node_types:
+ dcae.nodes.ContainerizedComponent:
+ # Bese type for all containerized components
+ # Captures common properties and interfaces
+ derived_from: cloudify.nodes.Root
+ properties:
+ image:
+ type: string
+ description: Full uri of the Docker image
+
+ application_config:
+ default: {}
+ description: >
+ Application configuration for this Docker component. The data structure is
+ expected to be a complex map (native YAML) and to be constructed and filled
+ by the creator of the blueprint.
+
+ docker_config:
+ default: {}
+ description: >
+ This is what is the auxilary portion of the component spec that contains things
+ like healthcheck definitions for the Docker component. Health checks are
+ optional.
+
+ log_info:
+ type: dcae.types.LoggingInfo
+ description: >
+ Information for setting up centralized logging via ELK.
+ required: false
+
+ replicas:
+ type: integer
+ description: >
+ The number of instances of the component that should be launched initially
+ default: 1
+
+ always_pull_image:
+ type: boolean
+ description: >
+ Set to true if the orchestrator should always pull a new copy of the image
+ before deploying. By default the orchestrator pulls only if the image is
+ not already present on the Docker host where the container is being launched.
+ default: false
+
+ interfaces:
+ dcae.interfaces.update:
+ scale:
+ implementation: k8s.k8splugin.scale
+ update_image:
+ implementation: k8s.k8splugin.update_image
+
+ # The ContainerizedServiceComponent node type is to be used for DCAE service components that
+ # are to be run in a Docker container. This node type goes beyond that of a ordinary Docker
+ # plugin where it has DCAE platform specific functionality:
+ #
+ # * Generation of the service component name
+ # * Managing of service component configuration information
+ #
+ # The plugin deploys the container into a Kubernetes cluster with a very specific choice
+ # of Kubernetes elements that are deliberately not under the control of the blueprint author.
+ # The idea is to deploy all service components in a consistent way, with the details abstracted
+ # away from the blueprint author.
+ dcae.nodes.ContainerizedServiceComponent:
+ derived_from: dcae.nodes.ContainerizedComponent
+ properties:
+ service_component_type:
+ type: string
+ description: Service component type of the application being run in the container
+
+ service_id:
+ type: string
+ description: >
+ Unique id for this DCAE service instance this component belongs to. This value
+ will be applied as a tag in the registration of this component with Consul.
+ default: Null
+
+ location_id:
+ type: string
+ description: >
+ Location id of where to run the container. Not used by the plugin. Here for backward compatibility.
+ default: Null
+ required: False
+
+ service_component_name_override:
+ type: string
+ description: >
+ Manually override and set the name for this Docker container node. If this
+ is set, then the name will not be auto-generated. Platform services are the
+ specific use cases for using this parameter because they have static
+ names for example the CDAP broker.
+ default: Null
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Generate service component name and populate config into Consul
+ implementation: k8s.k8splugin.create_for_components
+ start:
+ # Create Docker container and start
+ implementation: k8s.k8splugin.create_and_start_container_for_components
+ stop:
+ # Stop and remove Docker container
+ implementation: k8s.k8splugin.stop_and_remove_container
+ delete:
+ # Delete configuration from Consul
+ implementation: k8s.k8splugin.cleanup_discovery
+ dcae.interfaces.policy:
+ # This is to be invoked by the policy handler upon policy updates
+ policy_update:
+ implementation: k8s.k8splugin.policy_update
+
+ # This node type is intended for DCAE service components that use DMaaP and must use the
+ # DMaaP plugin.
+ dcae.nodes.ContainerizedServiceComponentUsingDmaap:
+ derived_from: dcae.nodes.ContainerizedServiceComponent
+ properties:
+ streams_publishes:
+ description: >
+ List of DMaaP streams used for publishing.
+
+ Message router items look like:
+
+ name: topic00
+ location: mtc5
+ client_role: XXXX
+ type: message_router
+
+ Data router items look like:
+
+ name: feed00
+ location: mtc5
+ type: data_router
+
+ This information is forwarded to the dmaap plugin to provision
+ default: []
+ streams_subscribes:
+ description: >
+ List of DMaaP streams used for subscribing.
+
+ Message router items look like:
+
+ name: topic00
+ location: mtc5
+ client_role: XXXX
+ type: message_router
+
+ Data router items look like:
+
+ name: feed00
+ location: mtc5
+ type: data_router
+ username: king
+ password: 123456
+ route: some-path
+ scheme: https
+
+ Note that username and password is optional. If not provided or null then the
+ plugin will generate them.
+
+ default: []
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Generate service component name and populate config into Consul
+ implementation: k8s.k8splugin.create_for_components_with_streams
+ start:
+ # Create Docker container and start
+ implementation: k8s.k8splugin.create_and_start_container_for_components_with_streams
+
+ # ContainerizedPlatformComponent is intended for DCAE platform services. Unlike the components,
+ # platform services have well-known names and well-known ports.
+ dcae.nodes.ContainerizedPlatformComponent:
+ derived_from: dcae.nodes.ContainerizedComponent
+ properties:
+ name:
+ description: >
+ Container name used to register with Consul
+ dns_name:
+ required: false
+ description: >
+ Name to be registered in the DNS for the service provided by the container.
+ If not provided, the 'name' field is used.
+ This is a work-around for the Kubernetes restriction on having '_' in a DNS name.
+ Having this field allows a component to look up its configuration using a name that
+ includes a '_' while providing a legal Kubernetes DNS name.
+
+ host_port:
+ type: integer
+ description: >
+ Network port that the platform service is expecting to expose on the host
+ default: 0
+
+ container_port:
+ type: integer
+ description: >
+ Network port that the platform service exposes in the container
+ default: 0
+
+ msb_registration:
+ type: dcae.types.MSBRegistration
+ description: >
+ Information for registering with MSB
+ required: false
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ # Populate config into Consul
+ implementation: k8s.k8splugin.create_for_platforms
+ start:
+ # Create Docker container and start
+ implementation: k8s.k8splugin.create_and_start_container_for_platforms
+ stop:
+ # Stop and remove Docker container
+ implementation: k8s.k8splugin.stop_and_remove_container
+ delete:
+ # Delete configuration from Consul
+ implementation: k8s.k8splugin.cleanup_discovery
+
+ # ContainerizedApplication is intended to be more of an all-purpose Docker container node
+ # for non-componentized applications.
+ dcae.nodes.ContainerizedApplication:
+ derived_from: cloudify.nodes.Root
+ properties:
+ name:
+ type: string
+ description: Name of the Docker container to be given
+ image:
+ type: string
+ description: Full uri of the Docker image
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ # Create Docker container and start
+ implementation: k8s.k8splugin.create_and_start_container
+ stop:
+ # Stop and remove Docker container
+ implementation: k8s.k8splugin.stop_and_remove_container
+ dcae.interfaces.scale:
+ scale:
+ implementation: k8s.k8splugin.scale
+ dcae.interfaces.update:
+ update_image:
+ implementation: k8s.k8splugin.update_image
diff --git a/k8s/k8sclient/__init__.py b/k8s/k8sclient/__init__.py
new file mode 100644
index 0000000..3cc19f2
--- /dev/null
+++ b/k8s/k8sclient/__init__.py
@@ -0,0 +1,20 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+from .k8sclient import deploy, undeploy, is_available, scale, upgrade, rollback, execute_command_in_deployment \ No newline at end of file
diff --git a/k8s/k8sclient/k8sclient.py b/k8s/k8sclient/k8sclient.py
new file mode 100644
index 0000000..e388fb5
--- /dev/null
+++ b/k8s/k8sclient/k8sclient.py
@@ -0,0 +1,546 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+import os
+import uuid
+from msb import msb
+from kubernetes import config, client, stream
+
+# Default values for readiness probe
+PROBE_DEFAULT_PERIOD = 15
+PROBE_DEFAULT_TIMEOUT = 1
+
+def _create_deployment_name(component_name):
+ return "dep-{0}".format(component_name)
+
+def _create_service_name(component_name):
+ return "{0}".format(component_name)
+
+def _create_exposed_service_name(component_name):
+ return ("x{0}".format(component_name))[:63]
+
+def _configure_api():
+ # Look for a kubernetes config file in ~/.kube/config
+ kubepath = os.path.join(os.environ["HOME"], '.kube/config')
+ if os.path.exists(kubepath):
+ config.load_kube_config(kubepath)
+ else:
+ # Maybe we're running in a k8s container and we can use info provided by k8s
+ # We would like to use:
+ # config.load_incluster_config()
+ # but this looks into os.environ for kubernetes host and port, and from
+ # the plugin those aren't visible. So we use the InClusterConfigLoader class,
+ # where we can set the environment to what we like.
+ # This is probably brittle! Maybe there's a better alternative.
+ localenv = {
+ config.incluster_config.SERVICE_HOST_ENV_NAME : "kubernetes.default.svc.cluster.local",
+ config.incluster_config.SERVICE_PORT_ENV_NAME : "443"
+ }
+ config.incluster_config.InClusterConfigLoader(
+ token_filename=config.incluster_config.SERVICE_TOKEN_FILENAME,
+ cert_filename=config.incluster_config.SERVICE_CERT_FILENAME,
+ environ=localenv
+ ).load_and_set()
+
+def _create_probe(hc, port):
+ ''' Create a Kubernetes probe based on info in the health check dictionary hc '''
+ probe_type = hc['type']
+ probe = None
+ period = hc.get('interval', PROBE_DEFAULT_PERIOD)
+ timeout = hc.get('timeout', PROBE_DEFAULT_TIMEOUT)
+ if probe_type in ['http', 'https']:
+ probe = client.V1Probe(
+ failure_threshold = 1,
+ initial_delay_seconds = 5,
+ period_seconds = period,
+ timeout_seconds = timeout,
+ http_get = client.V1HTTPGetAction(
+ path = hc['endpoint'],
+ port = port,
+ scheme = probe_type.upper()
+ )
+ )
+ elif probe_type in ['script', 'docker']:
+ probe = client.V1Probe(
+ failure_threshold = 1,
+ initial_delay_seconds = 5,
+ period_seconds = period,
+ timeout_seconds = timeout,
+ _exec = client.V1ExecAction(
+ command = [hc['script']]
+ )
+ )
+ return probe
+
+def _create_container_object(name, image, always_pull, env={}, container_ports=[], volume_mounts = [], readiness = None):
+ # Set up environment variables
+ # Copy any passed in environment variables
+ env_vars = [client.V1EnvVar(name=k, value=env[k]) for k in env.keys()]
+ # Add POD_IP with the IP address of the pod running the container
+ pod_ip = client.V1EnvVarSource(field_ref = client.V1ObjectFieldSelector(field_path="status.podIP"))
+ env_vars.append(client.V1EnvVar(name="POD_IP",value_from=pod_ip))
+
+ # If a health check is specified, create a readiness probe
+ # (For an HTTP-based check, we assume it's at the first container port)
+ probe = None
+
+ if readiness:
+ hc_port = None
+ if len(container_ports) > 0:
+ hc_port = container_ports[0]
+ probe = _create_probe(readiness, hc_port)
+
+ # Define container for pod
+ return client.V1Container(
+ name=name,
+ image=image,
+ image_pull_policy='Always' if always_pull else 'IfNotPresent',
+ env=env_vars,
+ ports=[client.V1ContainerPort(container_port=p) for p in container_ports],
+ volume_mounts = volume_mounts,
+ readiness_probe = probe
+ )
+
+def _create_deployment_object(component_name,
+ containers,
+ replicas,
+ volumes,
+ labels={},
+ pull_secrets=[]):
+
+ deployment_name = _create_deployment_name(component_name)
+
+ # Label the pod with the deployment name, so we can find it easily
+ labels.update({"k8sdeployment" : deployment_name})
+
+ # pull_secrets is a list of the names of the k8s secrets containing docker registry credentials
+ # See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+ ips = []
+ for secret in pull_secrets:
+ ips.append(client.V1LocalObjectReference(name=secret))
+
+ # Define pod template
+ template = client.V1PodTemplateSpec(
+ metadata=client.V1ObjectMeta(labels=labels),
+ spec=client.V1PodSpec(hostname=component_name,
+ containers=containers,
+ volumes=volumes,
+ image_pull_secrets=ips)
+ )
+
+ # Define deployment spec
+ spec = client.ExtensionsV1beta1DeploymentSpec(
+ replicas=replicas,
+ template=template
+ )
+
+ # Create deployment object
+ deployment = client.ExtensionsV1beta1Deployment(
+ kind="Deployment",
+ metadata=client.V1ObjectMeta(name=deployment_name),
+ spec=spec
+ )
+
+ return deployment
+
+def _create_service_object(service_name, component_name, service_ports, annotations, labels, service_type):
+ service_spec = client.V1ServiceSpec(
+ ports=service_ports,
+ selector={"app" : component_name},
+ type=service_type
+ )
+ if annotations:
+ metadata = client.V1ObjectMeta(name=_create_service_name(service_name), labels=labels, annotations=annotations)
+ else:
+ metadata = client.V1ObjectMeta(name=_create_service_name(service_name), labels=labels)
+
+ service = client.V1Service(
+ kind="Service",
+ api_version="v1",
+ metadata=metadata,
+ spec=service_spec
+ )
+ return service
+
+def _parse_ports(port_list):
+ container_ports = []
+ port_map = {}
+ for p in port_list:
+ try:
+ [container, host] = (p.strip()).split(":",2)
+ cport = int(container)
+ container_ports.append(cport)
+ hport = int(host)
+ port_map[container] = hport
+ except:
+ pass # if something doesn't parse, we just ignore it
+
+ return container_ports, port_map
+
+def _parse_volumes(volume_list):
+ volumes = []
+ volume_mounts = []
+ for v in volume_list:
+ vname = str(uuid.uuid4())
+ vhost = v['host']['path']
+ vcontainer = v['container']['bind']
+ vro = (v['container']['mode'] == 'ro')
+ volumes.append(client.V1Volume(name=vname, host_path=client.V1HostPathVolumeSource(path=vhost)))
+ volume_mounts.append(client.V1VolumeMount(name=vname, mount_path=vcontainer, read_only=vro))
+
+ return volumes, volume_mounts
+
+def _service_exists(namespace, component_name):
+ exists = False
+ try:
+ _configure_api()
+ client.CoreV1Api().read_namespaced_service(_create_service_name(component_name), namespace)
+ exists = True
+ except client.rest.ApiException:
+ pass
+
+ return exists
+
+def _patch_deployment(namespace, deployment, modify):
+ '''
+ Gets the current spec for 'deployment' in 'namespace',
+ uses the 'modify' function to change the spec,
+ then sends the updated spec to k8s.
+ '''
+ _configure_api()
+
+ # Get deployment spec
+ spec = client.ExtensionsV1beta1Api().read_namespaced_deployment(deployment, namespace)
+
+ # Apply changes to spec
+ spec = modify(spec)
+
+ # Patch the deploy with updated spec
+ client.ExtensionsV1beta1Api().patch_namespaced_deployment(deployment, namespace, spec)
+
+def _execute_command_in_pod(namespace, pod_name, command):
+ '''
+ Execute the command (specified by an argv-style list in the "command" parameter) in
+ the specified pod in the specified namespace. For now at least, we use this only to
+ run a notification script in a pod after a configuration change.
+
+ The straightforward way to do this is with the V1 Core API function "connect_get_namespaced_pod_exec".
+ Unfortunately due to a bug/limitation in the Python client library, we can't call it directly.
+ We have to make the API call through a Websocket connection using the kubernetes.stream wrapper API.
+ I'm following the example code at https://github.com/kubernetes-client/python/blob/master/examples/exec.py.
+ There are several issues tracking this, in various states. It isn't clear that there will ever
+ be a fix.
+ - https://github.com/kubernetes-client/python/issues/58
+ - https://github.com/kubernetes-client/python/issues/409
+ - https://github.com/kubernetes-client/python/issues/526
+
+ The main consequence of the workaround using "stream" is that the caller does not get an indication
+ of the exit code returned by the command when it completes execution. It turns out that the
+ original implementation of notification in the Docker plugin did not use this result, so we can
+ still match the original notification functionality.
+
+ The "stream" approach returns a string containing any output sent by the command to stdout or stderr.
+ We'll return that so it can logged.
+ '''
+ _configure_api()
+ try:
+ output = stream.stream(client.CoreV1Api().connect_get_namespaced_pod_exec,
+ name=pod_name,
+ namespace=namespace,
+ command=command,
+ stdout=True,
+ stderr=True,
+ stdin=False,
+ tty=False)
+ except client.rest.ApiException as e:
+ # If the exception indicates the pod wasn't found, it's not a fatal error.
+ # It existed when we enumerated the pods for the deployment but no longer exists.
+ # Unfortunately, the only way to distinguish a pod not found from any other error
+ # is by looking at the reason text.
+ # (The ApiException's "status" field should contain the HTTP status code, which would
+ # be 404 if the pod isn't found, but empirical testing reveals that "status" is set
+ # to zero.)
+ if "404 not found" in e.reason.lower():
+ output = "Pod not found"
+ else:
+ raise e
+
+ return {"pod" : pod_name, "output" : output}
+
+def deploy(namespace, component_name, image, replicas, always_pull, k8sconfig, **kwargs):
+ '''
+ This will create a k8s Deployment and, if needed, one or two k8s Services.
+ (We are being opinionated in our use of k8s... this code decides what k8s abstractions and features to use.
+ We're not exposing k8s to the component developer and the blueprint author.
+ This is a conscious choice. We want to use k8s in a controlled, consistent way, and we want to hide
+ the details from the component developer and the blueprint author.)
+
+ namespace: the Kubernetes namespace into which the component is deployed
+ component_name: the component name, used to derive names of Kubernetes entities
+ image: the docker image for the component being deployed
+ replica: the number of instances of the component to be deployed
+ always_pull: boolean flag, indicating that Kubernetes should always pull a new copy of
+ the Docker image for the component, even if it is already present on the Kubernetes node.
+ k8sconfig contains:
+ - image_pull_secrets: a list of names of image pull secrets that can be used for retrieving images.
+ (DON'T PANIC: these are just the names of secrets held in the Kubernetes secret store.)
+ - filebeat: a dictionary of filebeat sidecar parameters:
+ "log_path" : mount point for log volume in filebeat container
+ "data_path" : mount point for data volume in filebeat container
+ "config_path" : mount point for config volume in filebeat container
+ "config_subpath" : subpath for config data in filebeat container
+ "config_map" : ConfigMap holding the filebeat configuration
+ "image": Docker image to use for filebeat
+ kwargs may have:
+ - volumes: array of volume objects, where a volume object is:
+ {"host":{"path": "/path/on/host"}, "container":{"bind":"/path/on/container","mode":"rw_or_ro"}
+ - ports: array of strings in the form "container_port:host_port"
+ - env: map of name-value pairs ( {name0: value0, name1: value1...}
+ - msb_list: array of msb objects, where an msb object is as described in msb/msb.py.
+ - log_info: an object with info for setting up ELK logging, with the form:
+ {"log_directory": "/path/to/container/log/directory", "alternate_fb_path" : "/alternate/sidecar/log/path"}
+ - labels: dict with label-name/label-value pairs, e.g. {"cfydeployment" : "lsdfkladflksdfsjkl", "cfynode":"mycomponent"}
+ These label will be set on all the pods deployed as a result of this deploy() invocation.
+ - readiness: dict with health check info; if present, used to create a readiness probe for the main container. Includes:
+ - type: check is done by making http(s) request to an endpoint ("http", "https") or by exec'ing a script in the container ("script", "docker")
+ - interval: period (in seconds) between probes
+ - timeout: time (in seconds) to allow a probe to complete
+ - endpoint: the path portion of the URL that points to the readiness endpoint for "http" and "https" types
+ - path: the full path to the script to be executed in the container for "script" and "docker" types
+
+ '''
+
+ deployment_ok = False
+ cip_service_created = False
+ deployment_description = {
+ "namespace": namespace,
+ "deployment": '',
+ "services": []
+ }
+
+ try:
+ _configure_api()
+
+ # Get API handles
+ core = client.CoreV1Api()
+ ext = client.ExtensionsV1beta1Api()
+
+ # Parse the port mapping into [container_port,...] and [{"host_port" : "container_port"},...]
+ container_ports, port_map = _parse_ports(kwargs.get("ports", []))
+
+ # Parse the volumes list into volumes and volume_mounts for the deployment
+ volumes, volume_mounts = _parse_volumes(kwargs.get("volumes",[]))
+
+ # Initialize the list of containers that will be part of the pod
+ containers = []
+
+ # Set up the ELK logging sidecar container, if needed
+ log_info = kwargs.get("log_info")
+ if log_info and "log_directory" in log_info:
+ log_dir = log_info["log_directory"]
+ fb = k8sconfig["filebeat"]
+ sidecar_volume_mounts = []
+
+ # Create the volume for component log files and volume mounts for the component and sidecar containers
+ volumes.append(client.V1Volume(name="component-log", empty_dir=client.V1EmptyDirVolumeSource()))
+ volume_mounts.append(client.V1VolumeMount(name="component-log", mount_path=log_dir))
+ sc_path = log_info["alternate_fb_path"] if "alternate_fb_path" in log_info \
+ else "{0}/{1}".format(fb["log_path"], component_name)
+ sidecar_volume_mounts.append(client.V1VolumeMount(name="component-log", mount_path=sc_path))
+
+ # Create the volume for sidecar data and the volume mount for it
+ volumes.append(client.V1Volume(name="filebeat-data", empty_dir=client.V1EmptyDirVolumeSource()))
+ sidecar_volume_mounts.append(client.V1VolumeMount(name="filebeat-data", mount_path=fb["data_path"]))
+
+ # Create the container for the sidecar
+ containers.append(_create_container_object("filebeat", fb["image"], False, {}, [], sidecar_volume_mounts))
+
+ # Create the volume for the sidecar configuration data and the volume mount for it
+ # The configuration data is in a k8s ConfigMap that should be created when DCAE is installed.
+ volumes.append(
+ client.V1Volume(name="filebeat-conf", config_map=client.V1ConfigMapVolumeSource(name=fb["config_map"])))
+ sidecar_volume_mounts.append(
+ client.V1VolumeMount(name="filebeat-conf", mount_path=fb["config_path"], sub_path=fb["config_subpath"]))
+
+ # Create the container for the component
+ # Make it the first container in the pod
+ containers.insert(0, _create_container_object(component_name, image, always_pull, kwargs.get("env", {}), container_ports, volume_mounts, kwargs["readiness"]))
+
+ # Build the k8s Deployment object
+ labels = kwargs.get("labels", {})
+ labels.update({"app": component_name})
+ dep = _create_deployment_object(component_name, containers, replicas, volumes, labels, pull_secrets=k8sconfig["image_pull_secrets"])
+
+ # Have k8s deploy it
+ ext.create_namespaced_deployment(namespace, dep)
+ deployment_ok = True
+ deployment_description["deployment"] = _create_deployment_name(component_name)
+
+ # Create service(s), if a port mapping is specified
+ if port_map:
+ service_ports = [] # Ports exposed internally on the k8s network
+ exposed_ports = [] # Ports to be mapped to ports on the k8s nodes via NodePort
+ for cport, hport in port_map.iteritems():
+ service_ports.append(client.V1ServicePort(port=int(cport),name="port-{}".format(cport)))
+ if int(hport) != 0:
+ exposed_ports.append(client.V1ServicePort(port=int(cport), node_port=int(hport),name="xport-{}".format(cport)))
+
+ # If there are ports to be exposed via MSB, set up the annotation for the service
+ msb_list = kwargs.get("msb_list")
+ annotations = msb.create_msb_annotation(msb_list) if msb_list else ''
+
+ # Create a ClusterIP service for access via the k8s network
+ service = _create_service_object(_create_service_name(component_name), component_name, service_ports, annotations, labels, "ClusterIP")
+ core.create_namespaced_service(namespace, service)
+ cip_service_created = True
+ deployment_description["services"].append(_create_service_name(component_name))
+
+ # If there are ports to be exposed on the k8s nodes, create a "NodePort" service
+ if len(exposed_ports) > 0:
+ exposed_service = \
+ _create_service_object(_create_exposed_service_name(component_name), component_name, exposed_ports, '', labels, "NodePort")
+ core.create_namespaced_service(namespace, exposed_service)
+ deployment_description["services"].append(_create_exposed_service_name(component_name))
+
+ except Exception as e:
+ # If the ClusterIP service was created, delete the service:
+ if cip_service_created:
+ core.delete_namespaced_service(_create_service_name(component_name), namespace)
+ # If the deployment was created but not the service, delete the deployment
+ if deployment_ok:
+ client.ExtensionsV1beta1Api().delete_namespaced_deployment(_create_deployment_name(component_name), namespace, client.V1DeleteOptions())
+ raise e
+
+ return dep, deployment_description
+
+def undeploy(deployment_description):
+ _configure_api()
+
+ namespace = deployment_description["namespace"]
+
+ # remove any services associated with the component
+ for service in deployment_description["services"]:
+ client.CoreV1Api().delete_namespaced_service(service, namespace)
+
+ # Have k8s delete the underlying pods and replicaset when deleting the deployment.
+ options = client.V1DeleteOptions(propagation_policy="Foreground")
+ client.ExtensionsV1beta1Api().delete_namespaced_deployment(deployment_description["deployment"], namespace, options)
+
+def is_available(namespace, component_name):
+ _configure_api()
+ dep_status = client.AppsV1beta1Api().read_namespaced_deployment_status(_create_deployment_name(component_name), namespace)
+ # Check if the number of available replicas is equal to the number requested and that the replicas match the current spec
+ # This check can be used to verify completion of an initial deployment, a scale operation, or an update operation
+ return dep_status.status.available_replicas == dep_status.spec.replicas and dep_status.status.updated_replicas == dep_status.spec.replicas
+
+def scale(deployment_description, replicas):
+ ''' Trigger a scaling operation by updating the replica count for the Deployment '''
+
+ def update_replica_count(spec):
+ spec.spec.replicas = replicas
+ return spec
+
+ _patch_deployment(deployment_description["namespace"], deployment_description["deployment"], update_replica_count)
+
+def upgrade(deployment_description, image, container_index = 0):
+ ''' Trigger a rolling upgrade by sending a new image name/tag to k8s '''
+
+ def update_image(spec):
+ spec.spec.template.spec.containers[container_index].image = image
+ return spec
+
+ _patch_deployment(deployment_description["namespace"], deployment_description["deployment"], update_image)
+
+def rollback(deployment_description, rollback_to=0):
+ '''
+ Undo upgrade by rolling back to a previous revision of the deployment.
+ By default, go back one revision.
+ rollback_to can be used to supply a specific revision number.
+ Returns the image for the app container and the replica count from the rolled-back deployment
+ '''
+ '''
+ 2018-07-13
+ Currently this does not work due to a bug in the create_namespaced_deployment_rollback() method.
+ The k8s python client code throws an exception while processing the response from the API.
+ See:
+ - https://github.com/kubernetes-client/python/issues/491
+ - https://github.com/kubernetes/kubernetes/pull/63837
+ The fix has been merged into the master branch but is not in the latest release.
+ '''
+ _configure_api()
+ deployment = deployment_description["deployment"]
+ namespace = deployment_description["namespace"]
+
+ # Initiate the rollback
+ client.ExtensionsV1beta1Api().create_namespaced_deployment_rollback(
+ deployment,
+ namespace,
+ client.AppsV1beta1DeploymentRollback(name=deployment, rollback_to=client.AppsV1beta1RollbackConfig(revision=rollback_to)))
+
+ # Read back the spec for the rolled-back deployment
+ spec = client.ExtensionsV1beta1Api().read_namespaced_deployment(deployment, namespace)
+ return spec.spec.template.spec.containers[0].image, spec.spec.replicas
+
+def execute_command_in_deployment(deployment_description, command):
+ '''
+ Enumerates the pods in the k8s deployment identified by "deployment_description",
+ then executes the command (represented as an argv-style list) in "command" in
+ container 0 (the main application container) each of those pods.
+
+ Note that the sets of pods associated with a deployment can change over time. The
+ enumeration is a snapshot at one point in time. The command will not be executed in
+ pods that are created after the initial enumeration. If a pod disappears after the
+ initial enumeration and before the command is executed, the attempt to execute the
+ command will fail. This is not treated as a fatal error.
+
+ This approach is reasonable for the one current use case for "execute_command": running a
+ script to notify a container that its configuration has changed as a result of a
+ policy change. In this use case, the new configuration information is stored into
+ the configuration store (Consul), the pods are enumerated, and the command is executed.
+ If a pod disappears after the enumeration, the fact that the command cannot be run
+ doesn't matter--a nonexistent pod doesn't need to be reconfigured. Similarly, a pod that
+ comes up after the enumeration will get its initial configuration from the updated version
+ in Consul.
+
+ The optimal solution here would be for k8s to provide an API call to execute a command in
+ all of the pods for a deployment. Unfortunately, k8s does not provide such a call--the
+ only call provided by k8s operates at the pod level, not the deployment level.
+
+ Another interesting k8s factoid: there's no direct way to list the pods belong to a
+ particular k8s deployment. The deployment code above sets a label ("k8sdeployment") on
+ the pod that has the k8s deployment name. To list the pods, the code below queries for
+ pods with the label carrying the deployment name.
+ '''
+
+ _configure_api()
+ deployment = deployment_description["deployment"]
+ namespace = deployment_description["namespace"]
+
+ # Get names of all the running pods belonging to the deployment
+ pod_names = [pod.metadata.name for pod in client.CoreV1Api().list_namespaced_pod(
+ namespace = namespace,
+ label_selector = "k8sdeployment={0}".format(deployment),
+ field_selector = "status.phase=Running"
+ ).items]
+
+ def do_execute(pod_name):
+ return _execute_command_in_pod(namespace, pod_name, command)
+
+ # Execute command in the running pods
+ return map(do_execute, pod_names) \ No newline at end of file
diff --git a/k8s/k8splugin/__init__.py b/k8s/k8splugin/__init__.py
new file mode 100644
index 0000000..7f721b2
--- /dev/null
+++ b/k8s/k8splugin/__init__.py
@@ -0,0 +1,30 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# REVIEW: Tried to source the version from here but you run into import issues
+# because "tasks" module is loaded. This method seems to be the PEP 396
+# recommended way and is listed #3 here https://packaging.python.org/single_source_version/
+# __version__ = '0.1.0'
+
+from .tasks import create_for_components, create_for_components_with_streams, \
+ create_and_start_container_for_components_with_streams, \
+ create_for_platforms, create_and_start_container, \
+ create_and_start_container_for_components, create_and_start_container_for_platforms, \
+ stop_and_remove_container, cleanup_discovery, policy_update, scale, update_image \ No newline at end of file
diff --git a/k8s/k8splugin/decorators.py b/k8s/k8splugin/decorators.py
new file mode 100644
index 0000000..b9b32bf
--- /dev/null
+++ b/k8s/k8splugin/decorators.py
@@ -0,0 +1,116 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import copy
+
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError, RecoverableError
+
+from k8splugin import discovery as dis
+from k8splugin import utils
+from k8splugin.exceptions import (DockerPluginDependencyNotReadyError,
+ DockerPluginDeploymentError)
+
+
+def monkeypatch_loggers(task_func):
+ """Sets up the dependent loggers"""
+
+ def wrapper(**kwargs):
+ # Ouch! Monkeypatch loggers
+ dis.logger = ctx.logger
+
+ return task_func(**kwargs)
+
+ return wrapper
+
+
+def wrap_error_handling_start(task_start_func):
+ """Wrap error handling for the start operations"""
+
+ def wrapper(**kwargs):
+ try:
+ return task_start_func(**kwargs)
+ except DockerPluginDependencyNotReadyError as e:
+ # You are here because things we need like a working docker host is not
+ # available yet so let Cloudify try again later.
+ raise RecoverableError(e)
+ except DockerPluginDeploymentError as e:
+ # Container failed to come up in the allotted time. This is deemed
+ # non-recoverable.
+ raise NonRecoverableError(e)
+ except Exception as e:
+ ctx.logger.error("Unexpected error while starting container: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+ return wrapper
+
+
+def _wrapper_merge_inputs(task_func, properties, **kwargs):
+ """Merge Cloudify properties with input kwargs before calling task func"""
+ inputs = copy.deepcopy(properties)
+ # Recursively update
+ utils.update_dict(inputs, kwargs)
+
+ # Apparently kwargs contains "ctx" which is cloudify.context.CloudifyContext
+ # This has to be removed and not copied into runtime_properties else you get
+ # JSON serialization errors.
+ if "ctx" in inputs:
+ del inputs["ctx"]
+
+ return task_func(**inputs)
+
+def merge_inputs_for_create(task_create_func):
+ """Merge all inputs for start operation into one dict"""
+
+ # Needed to wrap the wrapper because I was seeing issues with
+ # "RuntimeError: No context set in current execution thread"
+ def wrapper(**kwargs):
+ # NOTE: ctx.node.properties is an ImmutableProperties instance which is
+ # why it is passed into a mutable dict so that it can be deep copied
+ return _wrapper_merge_inputs(task_create_func,
+ dict(ctx.node.properties), **kwargs)
+
+ return wrapper
+
+def merge_inputs_for_start(task_start_func):
+ """Merge all inputs for start operation into one dict"""
+
+ # Needed to wrap the wrapper because I was seeing issues with
+ # "RuntimeError: No context set in current execution thread"
+ def wrapper(**kwargs):
+ return _wrapper_merge_inputs(task_start_func,
+ ctx.instance.runtime_properties, **kwargs)
+
+ return wrapper
+
+def wrap_error_handling_update(update_func):
+ """ Wrap error handling for update operations (scale and upgrade) """
+
+ def wrapper(**kwargs):
+ try:
+ return update_func(**kwargs)
+ except DockerPluginDeploymentError:
+ raise NonRecoverableError ("Update operation did not complete successfully in the alloted time")
+ except Exception as e:
+ ctx.logger.error ("Unexpected error during update operation: {0}".format(str(e)))
+ raise NonRecoverableError(e)
+
+ return wrapper
diff --git a/k8s/k8splugin/discovery.py b/k8s/k8splugin/discovery.py
new file mode 100644
index 0000000..56f8260
--- /dev/null
+++ b/k8s/k8splugin/discovery.py
@@ -0,0 +1,269 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import json
+import logging
+import re
+import uuid
+from functools import partial
+
+import consul
+import requests
+
+logger = logging.getLogger("discovery")
+
+
+class DiscoveryError(RuntimeError):
+ pass
+
+class DiscoveryConnectionError(RuntimeError):
+ pass
+
+class DiscoveryServiceNotFoundError(RuntimeError):
+ pass
+
+class DiscoveryKVEntryNotFoundError(RuntimeError):
+ pass
+
+
+def _wrap_consul_call(consul_func, *args, **kwargs):
+ """Wrap Consul call to map errors"""
+ try:
+ return consul_func(*args, **kwargs)
+ except requests.exceptions.ConnectionError as e:
+ raise DiscoveryConnectionError(e)
+
+
+def generate_service_component_name(service_component_type):
+ """Generate service component id used to pass into the service component
+ instance and used as the key to the service component configuration.
+
+ Updated for use with Kubernetes. Sometimes the service component name gets
+ used in Kubernetes in contexts (such as naming a Kubernetes Service) that
+ requires the name to conform to the RFC1035 DNS "label" syntax:
+ -- starts with an alpha
+ -- contains only of alphanumerics and "-"
+ -- <= 63 characters long
+
+ Format:
+ s<service component id>-<service component type>,
+ truncated to 63 characters, "_" replaced with "-" in service_component_type,
+ other non-conforming characters removed from service_component_type
+ """
+ # Random generated
+ # Copied from cdap plugin
+ sct = re.sub('[^A-Za-z0-9-]','',(service_component_type.replace('_','-')))
+ return ("s{0}-{1}".format(str(uuid.uuid4()).replace("-",""),sct))[:63]
+
+
+def create_kv_conn(host):
+ """Create connection to key-value store
+
+ Returns a Consul client to the specified Consul host"""
+ try:
+ [hostname, port] = host.split(":")
+ return consul.Consul(host=hostname, port=int(port))
+ except ValueError as e:
+ return consul.Consul(host=host)
+
+def push_service_component_config(kv_conn, service_component_name, config):
+ config_string = config if isinstance(config, str) else json.dumps(config)
+ kv_put_func = partial(_wrap_consul_call, kv_conn.kv.put)
+
+ if kv_put_func(service_component_name, config_string):
+ logger.info("Added config for {0}".format(service_component_name))
+ else:
+ raise DiscoveryError("Failed to push configuration")
+
+def remove_service_component_config(kv_conn, service_component_name):
+ kv_delete_func = partial(_wrap_consul_call, kv_conn.kv.delete)
+ kv_delete_func(service_component_name)
+
+
+def get_kv_value(kv_conn, key):
+ """Get a key-value entry's value from Consul
+
+ Raises DiscoveryKVEntryNotFoundError if entry not found
+ """
+ kv_get_func = partial(_wrap_consul_call, kv_conn.kv.get)
+ (index, val) = kv_get_func(key)
+
+ if val:
+ return json.loads(val['Value']) # will raise ValueError if not JSON, let it propagate
+ else:
+ raise DiscoveryKVEntryNotFoundError("{0} kv entry not found".format(key))
+
+
+def _create_rel_key(service_component_name):
+ return "{0}:rel".format(service_component_name)
+
+def store_relationship(kv_conn, source_name, target_name):
+ # TODO: Rel entry may already exist in a one-to-many situation. Need to
+ # support that.
+ rel_key = _create_rel_key(source_name)
+ rel_value = [target_name] if target_name else []
+
+ kv_put_func = partial(_wrap_consul_call, kv_conn.kv.put)
+ kv_put_func(rel_key, json.dumps(rel_value))
+ logger.info("Added relationship for {0}".format(rel_key))
+
+def delete_relationship(kv_conn, service_component_name):
+ rel_key = _create_rel_key(service_component_name)
+ kv_get_func = partial(_wrap_consul_call, kv_conn.kv.get)
+ index, rels = kv_get_func(rel_key)
+
+ if rels:
+ rels = json.loads(rels["Value"].decode("utf-8"))
+ kv_delete_func = partial(_wrap_consul_call, kv_conn.kv.delete)
+ kv_delete_func(rel_key)
+ return rels
+ else:
+ return []
+
+def lookup_service(kv_conn, service_component_name):
+ catalog_get_func = partial(_wrap_consul_call, kv_conn.catalog.service)
+ index, results = catalog_get_func(service_component_name)
+
+ if results:
+ return results
+ else:
+ raise DiscoveryServiceNotFoundError("Failed to find: {0}".format(service_component_name))
+
+
+# TODO: Note these functions have been (for the most part) shamelessly lifted from
+# dcae-cli and should really be shared.
+
+def _is_healthy_pure(get_health_func, instance):
+ """Checks to see if a component instance is running healthy
+
+ Pure function edition
+
+ Args
+ ----
+ get_health_func: func(string) -> complex object
+ Look at unittests in test_discovery to see examples
+ instance: (string) fully qualified name of component instance
+
+ Returns
+ -------
+ True if instance has been found and is healthy else False
+ """
+ index, resp = get_health_func(instance)
+
+ if resp:
+ def is_passing(instance):
+ return all([check["Status"] == "passing" for check in instance["Checks"]])
+
+ return any([is_passing(instance) for instance in resp])
+ else:
+ return False
+
+def is_healthy(consul_host, instance):
+ """Checks to see if a component instance is running healthy
+
+ Impure function edition
+
+ Args
+ ----
+ consul_host: (string) host string of Consul
+ instance: (string) fully qualified name of component instance
+
+ Returns
+ -------
+ True if instance has been found and is healthy else False
+ """
+ cons = create_kv_conn(consul_host)
+
+ get_health_func = partial(_wrap_consul_call, cons.health.service)
+ return _is_healthy_pure(get_health_func, instance)
+
+
+def add_to_entry(conn, key, add_name, add_value):
+ """
+ Find 'key' in consul.
+ Treat its value as a JSON string representing a dict.
+ Extend the dict by adding an entry with key 'add_name' and value 'add_value'.
+ Turn the resulting extended dict into a JSON string.
+ Store the string back into Consul under 'key'.
+ Watch out for conflicting concurrent updates.
+
+ Example:
+ Key 'xyz:dmaap' has the value '{"feed00": {"feed_url" : "http://example.com/feeds/999"}}'
+ add_to_entry('xyz:dmaap', 'topic00', {'topic_url' : 'http://example.com/topics/1229'})
+ should result in the value for key 'xyz:dmaap' in consul being updated to
+ '{"feed00": {"feed_url" : "http://example.com/feeds/999"}, "topic00" : {"topic_url" : "http://example.com/topics/1229"}}'
+ """
+ while True: # do until update succeeds
+ (index, val) = conn.kv.get(key) # index gives version of key retrieved
+
+ if val is None: # no key yet
+ vstring = '{}'
+ mod_index = 0 # Use 0 as the cas index for initial insertion of the key
+ else:
+ vstring = val['Value']
+ mod_index = val['ModifyIndex']
+
+ # Build the updated dict
+ # Exceptions just propagate
+ v = json.loads(vstring)
+ v[add_name] = add_value
+ new_vstring = json.dumps(v)
+
+ updated = conn.kv.put(key, new_vstring, cas=mod_index) # if the key has changed since retrieval, this will return false
+ if updated:
+ return v
+
+
+def _find_matching_services(services, name_search, tags):
+ """Find matching services given search criteria"""
+ def is_match(service):
+ srv_name, srv_tags = service
+ return name_search in srv_name and \
+ all(map(lambda tag: tag in srv_tags, tags))
+
+ return [ srv[0] for srv in services.items() if is_match(srv) ]
+
+def search_services(conn, name_search, tags):
+ """Search for services that match criteria
+
+ Args:
+ -----
+ name_search: (string) Name to search for as a substring
+ tags: (list) List of strings that are tags. A service must match **all** the
+ tags in the list.
+
+ Retruns:
+ --------
+ List of names of services that matched
+ """
+ # srvs is dict where key is service name and value is list of tags
+ catalog_get_services_func = partial(_wrap_consul_call, conn.catalog.services)
+ index, srvs = catalog_get_services_func()
+
+ if srvs:
+ matches = _find_matching_services(srvs, name_search, tags)
+
+ if matches:
+ return matches
+
+ raise DiscoveryServiceNotFoundError(
+ "No matches found: {0}, {1}".format(name_search, tags))
+ else:
+ raise DiscoveryServiceNotFoundError("No services found")
diff --git a/k8s/k8splugin/exceptions.py b/k8s/k8splugin/exceptions.py
new file mode 100644
index 0000000..0d8a341
--- /dev/null
+++ b/k8s/k8splugin/exceptions.py
@@ -0,0 +1,29 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+class DockerPluginDeploymentError(RuntimeError):
+ pass
+
+
+class DockerPluginDependencyNotReadyError(RuntimeError):
+ """Error to use when something that this plugin depends upon e.g. docker api,
+ consul is not ready"""
+ pass
+
diff --git a/k8s/k8splugin/tasks.py b/k8s/k8splugin/tasks.py
new file mode 100644
index 0000000..c9df9f4
--- /dev/null
+++ b/k8s/k8splugin/tasks.py
@@ -0,0 +1,687 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# Lifecycle interface calls for containerized components
+
+# Needed by Cloudify Manager to load google.auth for the Kubernetes python client
+import cloudify_importer
+
+import time, copy
+import json
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError, RecoverableError
+from onap_dcae_dcaepolicy_lib import Policies
+from k8splugin import discovery as dis
+from k8splugin.decorators import monkeypatch_loggers, wrap_error_handling_start, \
+ merge_inputs_for_start, merge_inputs_for_create, wrap_error_handling_update
+from k8splugin.exceptions import DockerPluginDeploymentError
+from k8splugin import utils
+from configure import configure
+import k8sclient
+
+# Get configuration
+plugin_conf = configure.configure()
+CONSUL_HOST = plugin_conf.get("consul_host")
+CONSUL_INTERNAL_NAME = plugin_conf.get("consul_dns_name")
+DCAE_NAMESPACE = plugin_conf.get("namespace")
+
+# Used to construct delivery urls for data router subscribers. Data router in FTL
+# requires https but this author believes that ONAP is to be defaulted to http.
+DEFAULT_SCHEME = "http"
+
+# Property keys
+SERVICE_COMPONENT_NAME = "service_component_name"
+CONTAINER_ID = "container_id"
+APPLICATION_CONFIG = "application_config"
+
+
+
+# Utility methods
+
+# Lifecycle interface calls for dcae.nodes.DockerContainer
+
+def _setup_for_discovery(**kwargs):
+ """Setup for config discovery"""
+ try:
+ name = kwargs['name']
+ application_config = kwargs[APPLICATION_CONFIG]
+
+ # NOTE: application_config is no longer a json string and is inputed as a
+ # YAML map which translates to a dict. We don't have to do any
+ # preprocessing anymore.
+ conn = dis.create_kv_conn(CONSUL_HOST)
+ dis.push_service_component_config(conn, name, application_config)
+ return kwargs
+ except dis.DiscoveryConnectionError as e:
+ raise RecoverableError(e)
+ except Exception as e:
+ ctx.logger.error("Unexpected error while pushing configuration: {0}"
+ .format(str(e)))
+ raise NonRecoverableError(e)
+
+def _generate_component_name(**kwargs):
+ """Generate component name"""
+ service_component_type = kwargs['service_component_type']
+ name_override = kwargs['service_component_name_override']
+
+ kwargs['name'] = name_override if name_override \
+ else dis.generate_service_component_name(service_component_type)
+ return kwargs
+
+def _done_for_create(**kwargs):
+ """Wrap up create operation"""
+ name = kwargs['name']
+ kwargs[SERVICE_COMPONENT_NAME] = name
+ # All updates to the runtime_properties happens here. I don't see a reason
+ # why we shouldn't do this because the context is not being mutated by
+ # something else and will keep the other functions pure (pure in the sense
+ # not dealing with CloudifyContext).
+ ctx.instance.runtime_properties.update(kwargs)
+ ctx.logger.info("Done setting up: {0}".format(name))
+ return kwargs
+
+
+@merge_inputs_for_create
+@monkeypatch_loggers
+@Policies.gather_policies_to_node()
+@operation
+def create_for_components(**create_inputs):
+ """Create step for service components
+
+ This interface is responsible for:
+
+ 1. Generating service component name
+ 2. Populating config information into Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **_generate_component_name(
+ **create_inputs)))
+
+
+def _parse_streams(**kwargs):
+ """Parse streams and setup for DMaaP plugin"""
+ # The DMaaP plugin requires this plugin to set the runtime properties
+ # keyed by the node name.
+ def setup_publishes(s):
+ kwargs[s["name"]] = s
+
+ map(setup_publishes, kwargs["streams_publishes"])
+
+ def setup_subscribes(s):
+ if s["type"] == "data_router":
+ # If username and password has been provided then generate it. The
+ # DMaaP plugin doesn't generate for subscribers. The generation code
+ # and length of username password has been lifted from the DMaaP
+ # plugin.
+
+ # Don't want to mutate the source
+ s = copy.deepcopy(s)
+ if not s.get("username", None):
+ s["username"] = utils.random_string(8)
+ if not s.get("password", None):
+ s["password"] = utils.random_string(10)
+
+ kwargs[s["name"]] = s
+
+ # NOTE: That the delivery url is constructed and setup in the start operation
+ map(setup_subscribes, kwargs["streams_subscribes"])
+
+ return kwargs
+
+def _setup_for_discovery_streams(**kwargs):
+ """Setup for discovery of streams
+
+ Specifically, there's a race condition this call addresses for data router
+ subscriber case. The component needs its feed subscriber information but the
+ DMaaP plugin doesn't provide this until after the docker plugin start
+ operation.
+ """
+ dr_subs = [kwargs[s["name"]] for s in kwargs["streams_subscribes"] \
+ if s["type"] == "data_router"]
+
+ if dr_subs:
+ dmaap_kv_key = "{0}:dmaap".format(kwargs["name"])
+ conn = dis.create_kv_conn(CONSUL_HOST)
+
+ def add_feed(dr_sub):
+ # delivery url and subscriber id will be fill by the dmaap plugin later
+ v = { "location": dr_sub["location"], "delivery_url": None,
+ "username": dr_sub["username"], "password": dr_sub["password"],
+ "subscriber_id": None }
+ return dis.add_to_entry(conn, dmaap_kv_key, dr_sub["name"], v) != None
+
+ try:
+ if all(map(add_feed, dr_subs)):
+ return kwargs
+ except Exception as e:
+ raise NonRecoverableError(e)
+
+ # You should never get here
+ raise NonRecoverableError("Failure updating feed streams in Consul")
+ else:
+ return kwargs
+
+
+@merge_inputs_for_create
+@monkeypatch_loggers
+@Policies.gather_policies_to_node()
+@operation
+def create_for_components_with_streams(**create_inputs):
+ """Create step for service components that use DMaaP
+
+ This interface is responsible for:
+
+ 1. Generating service component name
+ 2. Setup runtime properties for DMaaP plugin
+ 3. Populating application config into Consul
+ 4. Populating DMaaP config for data router subscribers in Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **_setup_for_discovery_streams(
+ **_parse_streams(
+ **_generate_component_name(
+ **create_inputs)))))
+
+
+@merge_inputs_for_create
+@monkeypatch_loggers
+@operation
+def create_for_platforms(**create_inputs):
+ """Create step for platform components
+
+ This interface is responible for:
+
+ 1. Populating config information into Consul
+ """
+ _done_for_create(
+ **_setup_for_discovery(
+ **create_inputs))
+
+
+def _lookup_service(service_component_name, consul_host=CONSUL_HOST,
+ with_port=False):
+ conn = dis.create_kv_conn(consul_host)
+ results = dis.lookup_service(conn, service_component_name)
+
+ if with_port:
+ # Just grab first
+ result = results[0]
+ return "{address}:{port}".format(address=result["ServiceAddress"],
+ port=result["ServicePort"])
+ else:
+ return results[0]["ServiceAddress"]
+
+
+def _verify_k8s_deployment(service_component_name, max_wait):
+ """Verify that the k8s Deployment is ready
+
+ Args:
+ -----
+ max_wait (integer): limit to how may attempts to make which translates to
+ seconds because each sleep is one second. 0 means infinite.
+
+ Return:
+ -------
+ True if deployment is ready else a DockerPluginDeploymentError exception
+ will be raised.
+ """
+ num_attempts = 1
+
+ while True:
+ if k8sclient.is_available(DCAE_NAMESPACE, service_component_name):
+ return True
+ else:
+ num_attempts += 1
+
+ if max_wait > 0 and max_wait < num_attempts:
+ raise DockerPluginDeploymentError("k8s deployment never became ready for {0}".format(service_component_name))
+
+ time.sleep(1)
+
+ return True
+
+def _create_and_start_container(container_name, image, **kwargs):
+ '''
+ This will create a k8s Deployment and, if needed, a k8s Service or two.
+ (We are being opinionated in our use of k8s... this code decides what k8s abstractions and features to use.
+ We're not exposing k8s to the component developer and the blueprint author.
+ This is a conscious choice. We want to use k8s in a controlled, consistent way, and we want to hide
+ the details from the component developer and the blueprint author.)
+
+ kwargs may have:
+ - volumes: array of volume objects, where a volume object is:
+ {"host":{"path": "/path/on/host"}, "container":{"bind":"/path/on/container","mode":"rw_or_ro"}
+ - ports: array of strings in the form "container_port:host_port"
+ - envs: map of name-value pairs ( {name0: value0, name1: value1...} )
+ - always_pull: boolean. If true, sets image pull policy to "Always"
+ so that a fresh copy of the image is always pull. Otherwise, sets
+ image pull policy to "IfNotPresent"
+ - msb_list: array of msb objects, where an msb object is as described in msb/msb.py.
+ - log_info: an object with info for setting up ELK logging, with the form:
+ {"log_directory": "/path/to/container/log/directory", "alternate_fb_path" : "/alternate/sidecar/log/path"}"
+ - replicas: number of replicas to be launched initially
+ - readiness: object with information needed to create a readiness check
+ '''
+ env = { "CONSUL_HOST": CONSUL_INTERNAL_NAME,
+ "CONFIG_BINDING_SERVICE": "config-binding-service" }
+ env.update(kwargs.get("envs", {}))
+ ctx.logger.info("Starting k8s deployment for {}, image: {}, env: {}, kwargs: {}".format(container_name, image, env, kwargs))
+ ctx.logger.info("Passing k8sconfig: {}".format(plugin_conf))
+ replicas = kwargs.get("replicas", 1)
+ _,dep = k8sclient.deploy(DCAE_NAMESPACE,
+ container_name,
+ image,
+ replicas = replicas,
+ always_pull=kwargs.get("always_pull_image", False),
+ k8sconfig=plugin_conf,
+ volumes=kwargs.get("volumes",[]),
+ ports=kwargs.get("ports",[]),
+ msb_list=kwargs.get("msb_list"),
+ env = env,
+ labels = kwargs.get("labels", {}),
+ log_info=kwargs.get("log_info"),
+ readiness=kwargs.get("readiness"))
+
+ # Capture the result of deployment for future use
+ ctx.instance.runtime_properties["k8s_deployment"] = dep
+ ctx.instance.runtime_properties["replicas"] = replicas
+ ctx.logger.info ("k8s deployment initiated successfully for {0}: {1}".format(container_name, dep))
+
+def _parse_cloudify_context(**kwargs):
+ """Parse Cloudify context
+
+ Extract what is needed. This is impure function because it requires ctx.
+ """
+ kwargs["deployment_id"] = ctx.deployment.id
+
+ # Set some labels for the Kubernetes pods
+ kwargs["labels"] = {
+ "cfydeployment" : ctx.deployment.id,
+ "cfynode": ctx.node.name,
+ "cfynodeinstance": ctx.instance.id
+ }
+
+ # Pick up the centralized logging info
+ if "log_info" in ctx.node.properties and "log_directory" in ctx.node.properties["log_info"]:
+ kwargs["log_info"] = ctx.node.properties["log_info"]
+
+ # Pick up replica count and always_pull_image flag
+ if "replicas" in ctx.node.properties:
+ kwargs["replicas"] = ctx.node.properties["replicas"]
+ if "always_pull_image" in ctx.node.properties:
+ kwargs["always_pull_image"] = ctx.node.properties["always_pull_image"]
+
+ return kwargs
+
+def _enhance_docker_params(**kwargs):
+ '''
+ Set up Docker environment variables and readiness check info
+ and inject into kwargs.
+ '''
+
+ # Get info for setting up readiness probe, if present
+ docker_config = kwargs.get("docker_config", {})
+ if "healthcheck" in docker_config:
+ kwargs["readiness"] = docker_config["healthcheck"]
+
+ envs = kwargs.get("envs", {})
+
+ # Set tags on this component for its Consul registration as a service
+ tags = [kwargs.get("deployment_id", None), kwargs["service_id"]]
+ tags = [ str(tag) for tag in tags if tag is not None ]
+ # Registrator will use this to register this component with tags. Must be
+ # comma delimited.
+ envs["SERVICE_TAGS"] = ",".join(tags)
+
+ kwargs["envs"] = envs
+
+ def combine_params(key, docker_config, kwargs):
+ v = docker_config.get(key, []) + kwargs.get(key, [])
+ if v:
+ kwargs[key] = v
+ return kwargs
+
+ # Add the lists of ports and volumes unintelligently - meaning just add the
+ # lists together with no deduping.
+ kwargs = combine_params("ports", docker_config, kwargs)
+ kwargs = combine_params("volumes", docker_config, kwargs)
+
+
+ return kwargs
+
+def _create_and_start_component(**kwargs):
+ """Create and start component (container)"""
+ image = kwargs["image"]
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+ # Need to be picky and manually select out pieces because just using kwargs
+ # which contains everything confused the execution of
+ # _create_and_start_container because duplicate variables exist
+ sub_kwargs = {
+ "volumes": kwargs.get("volumes", []),
+ "ports": kwargs.get("ports", None),
+ "envs": kwargs.get("envs", {}),
+ "log_info": kwargs.get("log_info", {}),
+ "labels": kwargs.get("labels", {}),
+ "readiness": kwargs.get("readiness",{})}
+ _create_and_start_container(service_component_name, image, **sub_kwargs)
+
+ return kwargs
+
+def _verify_component(**kwargs):
+ """Verify deployment is ready"""
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+
+ max_wait = kwargs.get("max_wait", 300)
+ ctx.logger.info("Waiting up to {0} secs for {1} to become ready".format(max_wait, service_component_name))
+
+ if _verify_k8s_deployment(service_component_name, max_wait):
+ ctx.logger.info("k8s deployment is ready for: {0}".format(service_component_name))
+
+ return kwargs
+
+def _done_for_start(**kwargs):
+ ctx.instance.runtime_properties.update(kwargs)
+ ctx.logger.info("Done starting: {0}".format(kwargs["name"]))
+ return kwargs
+
+def _setup_msb_registration(service_name, msb_reg):
+ return {
+ "serviceName" : service_name,
+ "port" : msb_reg.get("port", "80"),
+ "version" : msb_reg.get("version", "v1"),
+ "url" : msb_reg.get("url_path", "/v1"),
+ "protocol" : "REST",
+ "enable_ssl" : msb_reg.get("uses_ssl", False),
+ "visualRange" : "1"
+}
+
+@wrap_error_handling_start
+@merge_inputs_for_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_components(**start_inputs):
+ """Initiate Kubernetes deployment for service components
+
+ This operation method is to be used with the ContainerizedServiceComponent
+ node type. After initiating a Kubernetes deployment, the plugin will verify with Kubernetes
+ that the app is up and responding successfully to readiness probes.
+ """
+ _done_for_start(
+ **_verify_component(
+ **_create_and_start_component(
+ **_enhance_docker_params(
+ **_parse_cloudify_context(**start_inputs)))))
+
+
+def _update_delivery_url(**kwargs):
+ """Update the delivery url for data router subscribers"""
+ dr_subs = [kwargs[s["name"]] for s in kwargs["streams_subscribes"] \
+ if s["type"] == "data_router"]
+
+ if dr_subs:
+ service_component_name = kwargs[SERVICE_COMPONENT_NAME]
+ # TODO: Should NOT be setting up the delivery url with ip addresses
+ # because in the https case, this will not work because data router does
+ # a certificate validation using the fqdn.
+ subscriber_host = _lookup_service(service_component_name, with_port=True)
+
+ for dr_sub in dr_subs:
+ scheme = dr_sub["scheme"] if "scheme" in dr_sub else DEFAULT_SCHEME
+ if "route" not in dr_sub:
+ raise NonRecoverableError("'route' key missing from data router subscriber")
+ path = dr_sub["route"]
+ dr_sub["delivery_url"] = "{scheme}://{host}/{path}".format(
+ scheme=scheme, host=subscriber_host, path=path)
+ kwargs[dr_sub["name"]] = dr_sub
+
+ return kwargs
+
+@wrap_error_handling_start
+@merge_inputs_for_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_components_with_streams(**start_inputs):
+ """Initiate Kubernetes deployment for service components that have streams
+
+ This operation method is to be used with the ContainerizedServiceComponentUsingDmaap
+ node type. After initiating the Kubernetes deployment, the plugin will verify with
+ Kubernetes that the app is up and responding successfully to readiness probes.
+ """
+ _done_for_start(
+ **_update_delivery_url(
+ **_verify_component(
+ **_create_and_start_component(
+ **_enhance_docker_params(
+ **_parse_cloudify_context(**start_inputs))))))
+
+
+@wrap_error_handling_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container_for_platforms(**kwargs):
+ """Initiate Kubernetes deployment for platform components
+
+ This operation method is to be used with the ContainerizedPlatformComponent
+ node type.
+ """
+ # Capture node properties
+ image = ctx.node.properties["image"]
+ docker_config = ctx.node.properties.get("docker_config", {})
+ if "healthcheck" in docker_config:
+ kwargs["readiness"] = docker_config["healthcheck"]
+ if "dns_name" in ctx.node.properties:
+ service_component_name = ctx.node.properties["dns_name"]
+ else:
+ service_component_name = ctx.node.properties["name"]
+
+ # Set some labels for the Kubernetes pods
+ kwargs["labels"] = {
+ "cfydeployment" : ctx.deployment.id,
+ "cfynode": ctx.node.name,
+ "cfynodeinstance": ctx.instance.id
+ }
+
+ host_port = ctx.node.properties["host_port"]
+ container_port = ctx.node.properties["container_port"]
+
+ # Cloudify properties are all required and Cloudify complains that None
+ # is not a valid type for integer. Defaulting to 0 to indicate to not
+ # use this and not to set a specific port mapping in cases like service
+ # change handler.
+ if container_port != 0:
+ # Doing this because other nodes might want to use this property
+ port_mapping = "{cp}:{hp}".format(cp=container_port, hp=host_port)
+ ports = kwargs.get("ports", []) + [ port_mapping ]
+ kwargs["ports"] = ports
+ if "ports" not in kwargs:
+ ctx.logger.warn("No port mappings defined. Will randomly assign port.")
+
+ # All of the new node properties could be handled more DRYly!
+ # If a registration to MSB is required, then set up the registration info
+ if "msb_registration" in ctx.node.properties and "port" in ctx.node.properties["msb_registration"]:
+ kwargs["msb_list"] = [_setup_msb_registration(service_component_name, ctx.node.properties["msb_registration"])]
+
+ # If centralized logging via ELK is desired, then set up the logging info
+ if "log_info" in ctx.node.properties and "log_directory" in ctx.node.properties["log_info"]:
+ kwargs["log_info"] = ctx.node.properties["log_info"]
+
+ # Pick up replica count and always_pull_image flag
+ if "replicas" in ctx.node.properties:
+ kwargs["replicas"] = ctx.node.properties["replicas"]
+ if "always_pull_image" in ctx.node.properties:
+ kwargs["always_pull_image"] = ctx.node.properties["always_pull_image"]
+ _create_and_start_container(service_component_name, image, **kwargs)
+
+ # Verify that the k8s deployment is ready
+
+ max_wait = kwargs.get("max_wait", 300)
+ ctx.logger.info("Waiting up to {0} secs for {1} to become ready".format(max_wait, service_component_name))
+
+ if _verify_k8s_deployment(service_component_name, max_wait):
+ ctx.logger.info("k8s deployment ready for: {0}".format(service_component_name))
+
+
+@wrap_error_handling_start
+@monkeypatch_loggers
+@operation
+def create_and_start_container(**kwargs):
+ """Initiate a Kubernetes deployment for the generic ContainerizedApplication node type"""
+ service_component_name = ctx.node.properties["name"]
+ ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME] = service_component_name
+
+ image = ctx.node.properties["image"]
+
+ _create_and_start_container(service_component_name, image,**kwargs)
+
+@monkeypatch_loggers
+@operation
+def stop_and_remove_container(**kwargs):
+ """Delete Kubernetes deployment"""
+ try:
+ deployment_description = ctx.instance.runtime_properties["k8s_deployment"]
+ k8sclient.undeploy(deployment_description)
+
+ except Exception as e:
+ ctx.logger.error("Unexpected error while deleting k8s deployment: {0}"
+ .format(str(e)))
+
+@wrap_error_handling_update
+@monkeypatch_loggers
+@operation
+def scale(replicas, **kwargs):
+ """Change number of replicas in the deployment"""
+ service_component_name = ctx.instance.runtime_properties["service_component_name"]
+
+ if replicas > 0:
+ current_replicas = ctx.instance.runtime_properties["replicas"]
+ ctx.logger.info("Scaling {0} from {1} to {2} replica(s)".format(service_component_name, current_replicas, replicas))
+ deployment_description = ctx.instance.runtime_properties["k8s_deployment"]
+ k8sclient.scale(deployment_description, replicas)
+ ctx.instance.runtime_properties["replicas"] = replicas
+
+ # Verify that the scaling took place as expected
+ max_wait = kwargs.get("max_wait", 300)
+ ctx.logger.info("Waiting up to {0} secs for {1} to scale and become ready".format(max_wait, service_component_name))
+ if _verify_k8s_deployment(service_component_name, max_wait):
+ ctx.logger.info("Scaling complete: {0} from {1} to {2} replica(s)".format(service_component_name, current_replicas, replicas))
+
+ else:
+ ctx.logger.info("Ignoring request to scale {0} to zero replicas".format(service_component_name))
+
+@wrap_error_handling_update
+@monkeypatch_loggers
+@operation
+def update_image(image, **kwargs):
+ """ Restart component with a new Docker image """
+
+ service_component_name = ctx.instance.runtime_properties["service_component_name"]
+ if image:
+ current_image = ctx.instance.runtime_properties["image"]
+ ctx.logger.info("Updating app image for {0} from {1} to {2}".format(service_component_name, current_image, image))
+ deployment_description = ctx.instance.runtime_properties["k8s_deployment"]
+ k8sclient.upgrade(deployment_description, image)
+ ctx.instance.runtime_properties["image"] = image
+
+ # Verify that the update took place as expected
+ max_wait = kwargs.get("max_wait", 300)
+ ctx.logger.info("Waiting up to {0} secs for {1} to be updated and become ready".format(max_wait, service_component_name))
+ if _verify_k8s_deployment(service_component_name, max_wait):
+ ctx.logger.info("Update complete: {0} from {1} to {2}".format(service_component_name, current_image, image))
+
+ else:
+ ctx.logger.info("Ignoring update_image request for {0} with unusable image '{1}'".format(service_component_name, str(image)))
+
+#TODO: implement rollback operation when kubernetes python client fix is available.
+# (See comments in k8sclient.py.)
+# In the meantime, it's possible to undo an update_image operation by doing a second
+# update_image that specifies the older image.
+
+@monkeypatch_loggers
+@Policies.cleanup_policies_on_node
+@operation
+def cleanup_discovery(**kwargs):
+ """Delete configuration from Consul"""
+ service_component_name = ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+
+ try:
+ conn = dis.create_kv_conn(CONSUL_HOST)
+ dis.remove_service_component_config(conn, service_component_name)
+ except dis.DiscoveryConnectionError as e:
+ raise RecoverableError(e)
+
+
+def _notify_container(**kwargs):
+ """
+ Notify container using the policy section in the docker_config.
+ Notification consists of running a script in the application container
+ in each pod in the Kubernetes deployment associated with this node.
+ Return the list of notification results.
+ """
+ dc = kwargs["docker_config"]
+ resp = []
+
+ if "policy" in dc:
+ if dc["policy"]["trigger_type"] == "docker":
+
+ # Build the command to execute in the container
+ # SCRIPT_PATH policies {"policies" : ...., "updated_policies" : ..., "removed_policies": ...}
+ script_path = dc["policy"]["script_path"]
+ policy_data = {
+ "policies": kwargs["policies"],
+ "updated_policies": kwargs["updated_policies"],
+ "removed_policies": kwargs["removed_policies"]
+ }
+
+ command = [script_path, "policies", json.dumps(policy_data)]
+
+ # Execute the command
+ deployment_description = ctx.instance.runtime_properties["k8s_deployment"]
+ resp = k8sclient.execute_command_in_deployment(deployment_description, command)
+
+ # else the default is no trigger
+
+ return resp
+
+@operation
+@monkeypatch_loggers
+@Policies.update_policies_on_node()
+def policy_update(updated_policies, removed_policies=None, policies=None, **kwargs):
+ """Policy update task
+
+ This method is responsible for updating the application configuration and
+ notifying the applications that the change has occurred. This is to be used
+ for the dcae.interfaces.policy.policy_update operation.
+
+ :updated_policies: contains the list of changed policy-configs when configs_only=True
+ (default) Use configs_only=False to bring the full policy objects in :updated_policies:.
+ """
+ service_component_name = ctx.instance.runtime_properties[SERVICE_COMPONENT_NAME]
+ ctx.logger.info("policy_update for {0}-- updated_policies: {1}, removed_policies: {2}, policies: {3}"
+ .format(service_component_name, updated_policies, removed_policies, policies))
+ update_inputs = copy.deepcopy(ctx.instance.runtime_properties)
+ update_inputs["updated_policies"] = updated_policies
+ update_inputs["removed_policies"] = removed_policies
+ update_inputs["policies"] = policies
+
+ resp = _notify_container(**update_inputs)
+ ctx.logger.info("policy_update complete for {0}--notification results: {1}".format(service_component_name,json.dumps(resp)))
diff --git a/k8s/k8splugin/utils.py b/k8s/k8splugin/utils.py
new file mode 100644
index 0000000..c45af68
--- /dev/null
+++ b/k8s/k8splugin/utils.py
@@ -0,0 +1,43 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import string
+import random
+import collections
+
+
+def random_string(n):
+ """Random generate an ascii string of "n" length"""
+ corpus = string.ascii_lowercase + string.ascii_uppercase + string.digits
+ return ''.join(random.choice(corpus) for x in range(n))
+
+
+def update_dict(d, u):
+ """Recursively updates dict
+
+ Update dict d with dict u
+ """
+ for k, v in u.iteritems():
+ if isinstance(v, collections.Mapping):
+ r = update_dict(d.get(k, {}), v)
+ d[k] = r
+ else:
+ d[k] = u[k]
+ return d
diff --git a/k8s/msb/__init__.py b/k8s/msb/__init__.py
new file mode 100644
index 0000000..58d8f1c
--- /dev/null
+++ b/k8s/msb/__init__.py
@@ -0,0 +1,19 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
diff --git a/k8s/msb/msb.py b/k8s/msb/msb.py
new file mode 100644
index 0000000..5883f29
--- /dev/null
+++ b/k8s/msb/msb.py
@@ -0,0 +1,64 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+import uuid
+import json
+
+MSB_ANNOTATION_KEY = 'msb.onap.org/service-info'
+
+def _sanitize_service_info(service_info):
+ '''
+ Sanitize a dict containing the MSB annotation parameters for registering
+ a service with MSB. (Not bullet proof, but useful.)
+ MSB registration happens asynchronously from the installation flow: an MSB process
+ watches the k8s event stream for new Service creations and looks for the MSB annotation.
+ A bad annotation will fail silently. This sanitization process should make sure that something
+ gets put into the MSB's list of services, so that the problem can be seen.
+
+ service_info is a dict containing the MSB annotation parameters.
+ -- serviceName: the name under which the service is to be registered (default: random--pretty useless!)
+ -- port: the container port on which the service can be contacted (default: "80"--nearly as useless)
+ -- version: the API version (default: "v1")
+ -- url: the path to the application's API endpoint (default: "/")
+ -- protocol: see the MSB documentation--the default is usually OK (default: "REST")
+ -- enable_ssl: a flag indicating if the service uses SSL (True) or not (False) (default: True)
+ -- visualRange: "1" means the service is exposed only in ONAP, "0" means externally (default: "1")
+ (Note this is a string value)
+ '''
+ return {
+ 'serviceName': service_info.get('serviceName', str(uuid.uuid4())),
+ 'port': str(service_info.get('port', '80')),
+ 'version': service_info.get('version','v1'),
+ 'url': service_info.get('url','/'),
+ 'protocol': service_info.get('protocol','REST'),
+ 'enable_ssl': bool(service_info.get('enable_ssl', False)),
+ 'visualRange': str(service_info.get('visualRange', '1'))
+ }
+
+def create_msb_annotation(msb_service_list):
+ '''
+ Creates an annotation that can be added to a k8s Service to trigger
+ registration with MSB.
+ msb_list is a list of dicts each containing MSB registration information for a
+ service. (One k8s Service can have multiple ports, each one of which can be
+ registered as an MSB service.)
+ '''
+ return {MSB_ANNOTATION_KEY : json.dumps([_sanitize_service_info(service_info) for service_info in msb_service_list])}
diff --git a/k8s/pom.xml b/k8s/pom.xml
new file mode 100644
index 0000000..cd5a8d2
--- /dev/null
+++ b/k8s/pom.xml
@@ -0,0 +1,165 @@
+<?xml version="1.0"?>
+<!--
+================================================================================
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+
+ECOMP is a trademark and service mark of AT&T Intellectual Property.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.dcaegen2.platform</groupId>
+ <artifactId>plugins</artifactId>
+ <version>1.1.0-SNAPSHOT</version>
+ </parent>
+ <groupId>org.onap.dcaegen2.platform.plugins</groupId>
+ <artifactId>k8s</artifactId>
+ <name>k8s-plugin</name>
+ <version>1.4.0-SNAPSHOT</version>
+ <url>http://maven.apache.org</url>
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sonar.sources>.</sonar.sources>
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPath>coverage.xml</sonar.python.coverage.reportPath>
+ <sonar.language>py</sonar.language>
+ <sonar.pluginName>Python</sonar.pluginName>
+ <sonar.inclusions>**/*.py</sonar.inclusions>
+ <sonar.exclusions>tests/*,setup.py</sonar.exclusions>
+ </properties>
+ <build>
+ <finalName>${project.artifactId}-${project.version}</finalName>
+ <plugins>
+ <!-- plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>2.4.1</version>
+ <configuration>
+ <descriptors>
+ <descriptor>assembly/dep.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <id>make-assembly</id>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin -->
+ <!-- now we configure custom action (calling a script) at various lifecycle phases -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.2.1</version>
+ <executions>
+ <execution>
+ <id>clean phase script</id>
+ <phase>clean</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>clean</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>generate-sources script</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>generate-sources</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>compile script</id>
+ <phase>compile</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>compile</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>package script</id>
+ <phase>package</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>package</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>test script</id>
+ <phase>test</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>test</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>install script</id>
+ <phase>install</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>install</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>deploy script</id>
+ <phase>deploy</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>deploy</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/k8s/requirements.txt b/k8s/requirements.txt
new file mode 100644
index 0000000..833039f
--- /dev/null
+++ b/k8s/requirements.txt
@@ -0,0 +1,5 @@
+python-consul>=0.6.0,<1.0.0
+uuid==1.30
+onap-dcae-dcaepolicy-lib==2.4.0
+kubernetes==4.0.0
+cloudify-plugins-common==3.4 \ No newline at end of file
diff --git a/k8s/setup.py b/k8s/setup.py
new file mode 100644
index 0000000..7991584
--- /dev/null
+++ b/k8s/setup.py
@@ -0,0 +1,38 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+from setuptools import setup
+
+setup(
+ name='k8splugin',
+ description='Cloudify plugin for containerized components deployed using Kubernetes',
+ version="1.4.0",
+ author='J. F. Lucas, Michael Hwang, Tommy Carpenter',
+ packages=['k8splugin','k8sclient','msb','configure'],
+ zip_safe=False,
+ install_requires=[
+ "python-consul>=0.6.0,<1.0.0",
+ "uuid==1.30",
+ "onap-dcae-dcaepolicy-lib>=2.1.0,<3.0.0",
+ "cloudify-plugins-common==3.4",
+ "cloudify-python-importer==0.1.0",
+ "kubernetes==4.0.0"
+ ]
+)
diff --git a/k8s/tests/conftest.py b/k8s/tests/conftest.py
new file mode 100644
index 0000000..572a510
--- /dev/null
+++ b/k8s/tests/conftest.py
@@ -0,0 +1,34 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import pytest
+
+@pytest.fixture()
+def mockconfig(monkeypatch):
+ """ Override the regular configure() routine that reads a file and calls Consul"""
+ def altconfig():
+ return {
+ "consul_host": "consul",
+ "namespace":"dcae",
+ "consul_dns_name" : "consul",
+ "image_pull_secrets" : []
+ }
+ from configure import configure
+ monkeypatch.setattr(configure, 'configure', altconfig) \ No newline at end of file
diff --git a/k8s/tests/test_decorators.py b/k8s/tests/test_decorators.py
new file mode 100644
index 0000000..552fa4b
--- /dev/null
+++ b/k8s/tests/test_decorators.py
@@ -0,0 +1,34 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+def test_wrapper_merge_inputs(mockconfig):
+ from k8splugin import decorators as dec
+
+ properties = { "app_config": {"nested": { "a": 123, "b": 456 }, "foo": "duh"},
+ "image": "some-docker-image" }
+ kwargs = { "app_config": {"nested": {"a": 789, "c": "zyx"}} }
+
+ def task_func(**inputs):
+ return inputs
+
+ expected = { "app_config": {"nested": { "a": 789, "b": 456, "c": "zyx" },
+ "foo": "duh"}, "image": "some-docker-image" }
+
+ assert expected == dec._wrapper_merge_inputs(task_func, properties, **kwargs) \ No newline at end of file
diff --git a/k8s/tests/test_discovery.py b/k8s/tests/test_discovery.py
new file mode 100644
index 0000000..24e45ee
--- /dev/null
+++ b/k8s/tests/test_discovery.py
@@ -0,0 +1,71 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import pytest
+from functools import partial
+import requests
+
+def test_wrap_consul_call(mockconfig):
+ from k8splugin import discovery as dis
+
+ def foo(a, b, c="default"):
+ return " ".join([a, b, c])
+
+ wrapped_foo = partial(dis._wrap_consul_call, foo)
+ assert wrapped_foo("hello", "world") == "hello world default"
+ assert wrapped_foo("hello", "world", c="new masters") == "hello world new masters"
+
+ def foo_connection_error(a, b, c):
+ raise requests.exceptions.ConnectionError("simulate failed connection")
+
+ wrapped_foo = partial(dis._wrap_consul_call, foo_connection_error)
+ with pytest.raises(dis.DiscoveryConnectionError):
+ wrapped_foo("a", "b", "c")
+
+def test_generate_service_component_name(mockconfig):
+ from k8splugin import discovery as dis
+
+ component_type = "some-component-type"
+ name = dis.generate_service_component_name(component_type)
+ assert name.split("-", 1)[1] == component_type
+
+def test_find_matching_services(mockconfig):
+ from k8splugin import discovery as dis
+
+ services = { "component_dockerhost_1": ["foo", "bar"],
+ "platform_dockerhost": [], "component_dockerhost_2": ["baz"] }
+ assert sorted(["component_dockerhost_1", "component_dockerhost_2"]) \
+ == sorted(dis._find_matching_services(services, "component_dockerhost", []))
+
+ assert ["component_dockerhost_1"] == dis._find_matching_services(services, \
+ "component_dockerhost", ["foo", "bar"])
+
+ assert ["component_dockerhost_1"] == dis._find_matching_services(services, \
+ "component_dockerhost", ["foo"])
+
+ assert [] == dis._find_matching_services(services, "unknown", ["foo"])
+
+def test_is_healthy_pure(mockconfig):
+ from k8splugin import discovery as dis
+
+ def fake_is_healthy(name):
+ return 0, [{ "Checks": [{"Status": "passing"}] }]
+
+ assert True == dis._is_healthy_pure(fake_is_healthy, "some-component") \ No newline at end of file
diff --git a/k8s/tests/test_tasks.py b/k8s/tests/test_tasks.py
new file mode 100644
index 0000000..69e866d
--- /dev/null
+++ b/k8s/tests/test_tasks.py
@@ -0,0 +1,293 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+import copy
+import pytest
+from cloudify.exceptions import NonRecoverableError, RecoverableError
+
+
+def test_generate_component_name(mockconfig):
+ from k8splugin import tasks
+ kwargs = { "service_component_type": "doodle",
+ "service_component_name_override": None }
+
+ assert "doodle" in tasks._generate_component_name(**kwargs)["name"]
+
+ kwargs["service_component_name_override"] = "yankee"
+
+ assert "yankee" == tasks._generate_component_name(**kwargs)["name"]
+
+
+def test_parse_streams(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+ # Good case for streams_publishes
+ test_input = { "streams_publishes": [{"name": "topic00", "type": "message_router"},
+ {"name": "feed00", "type": "data_router"}],
+ "streams_subscribes": {} }
+
+ expected = {'feed00': {'type': 'data_router', 'name': 'feed00'},
+ 'streams_publishes': [{'type': 'message_router', 'name': 'topic00'},
+ {'type': 'data_router', 'name': 'feed00'}],
+ 'streams_subscribes': {},
+ 'topic00': {'type': 'message_router', 'name': 'topic00'}
+ }
+
+ assert expected == tasks._parse_streams(**test_input)
+
+ # Good case for streams_subscribes (password provided)
+ test_input = { "streams_publishes": {},
+ "streams_subscribes": [{"name": "topic01", "type": "message_router"},
+ {"name": "feed01", "type": "data_router", "username": "hero",
+ "password": "123456"}] }
+
+ expected = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+
+ assert expected == tasks._parse_streams(**test_input)
+
+ # Good case for streams_subscribes (password generated)
+ test_input = { "streams_publishes": {},
+ "streams_subscribes": [{"name": "topic01", "type": "message_router"},
+ {"name": "feed01", "type": "data_router", "username": None,
+ "password": None}] }
+
+ def not_so_random(n):
+ return "nosurprise"
+
+ monkeypatch.setattr(k8splugin.utils, "random_string", not_so_random)
+
+ expected = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'nosurprise', 'password': 'nosurprise'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': None,
+ 'password': None}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+
+ assert expected == tasks._parse_streams(**test_input)
+
+
+def test_setup_for_discovery(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+
+ test_input = { "name": "some-name",
+ "application_config": { "one": "a", "two": "b" } }
+
+ def fake_push_config(conn, name, application_config):
+ return
+
+ monkeypatch.setattr(k8splugin.discovery, "push_service_component_config",
+ fake_push_config)
+
+ assert test_input == tasks._setup_for_discovery(**test_input)
+
+ def fake_push_config_connection_error(conn, name, application_config):
+ raise k8splugin.discovery.DiscoveryConnectionError("Boom")
+
+ monkeypatch.setattr(k8splugin.discovery, "push_service_component_config",
+ fake_push_config_connection_error)
+
+ with pytest.raises(RecoverableError):
+ tasks._setup_for_discovery(**test_input)
+
+
+def test_setup_for_discovery_streams(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["name"] = "some-foo-service-component"
+
+ # Good case
+ def fake_add_to_entry(conn, key, add_name, add_value):
+ """
+ This fake method will check all the pieces that are used to make store
+ details in Consul
+ """
+ if key != test_input["name"] + ":dmaap":
+ return None
+ if add_name != "feed01":
+ return None
+ if add_value != {"location": "Bedminster", "delivery_url": None,
+ "username": "hero", "password": "123456", "subscriber_id": None}:
+ return None
+
+ return "SUCCESS!"
+
+ monkeypatch.setattr(k8splugin.discovery, "add_to_entry",
+ fake_add_to_entry)
+
+ assert tasks._setup_for_discovery_streams(**test_input) == test_input
+
+ # Good case - no data router subscribers
+ test_input = {"streams_publishes": [{"name": "topic00", "type": "message_router"}],
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'}]}
+ test_input["name"] = "some-foo-service-component"
+
+ assert tasks._setup_for_discovery_streams(**test_input) == test_input
+
+ # Bad case - something happened from the Consul call
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["name"] = "some-foo-service-component"
+
+ def barf(conn, key, add_name, add_value):
+ raise RuntimeError("Barf")
+
+ monkeypatch.setattr(k8splugin.discovery, "add_to_entry",
+ barf)
+
+ with pytest.raises(NonRecoverableError):
+ tasks._setup_for_discovery_streams(**test_input)
+
+
+def test_lookup_service(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+ def fake_lookup(conn, scn):
+ return [{"ServiceAddress": "192.168.1.1", "ServicePort": "80"}]
+
+ monkeypatch.setattr(k8splugin.discovery, "lookup_service",
+ fake_lookup)
+
+ assert "192.168.1.1" == tasks._lookup_service("some-component")
+ assert "192.168.1.1:80" == tasks._lookup_service("some-component",
+ with_port=True)
+
+
+def test_verify_container(monkeypatch, mockconfig):
+ import k8sclient
+ from k8splugin import tasks
+ from k8splugin.exceptions import DockerPluginDeploymentError
+
+ def fake_is_available_success(ch, scn):
+ return True
+
+ monkeypatch.setattr(k8sclient, "is_available",
+ fake_is_available_success)
+
+ assert tasks._verify_k8s_deployment("some-name", 3)
+
+ def fake_is_available_never_good(ch, scn):
+ return False
+
+ monkeypatch.setattr(k8sclient, "is_available",
+ fake_is_available_never_good)
+
+ with pytest.raises(DockerPluginDeploymentError):
+ tasks._verify_k8s_deployment("some-name", 2)
+
+
+def test_update_delivery_url(monkeypatch, mockconfig):
+ import k8splugin
+ from k8splugin import tasks
+ test_input = {'feed01': {'type': 'data_router', 'name': 'feed01',
+ 'username': 'hero', 'password': '123456', 'location': 'Bedminster',
+ 'route': 'some-path'},
+ 'streams_publishes': {},
+ 'streams_subscribes': [{'type': 'message_router', 'name': 'topic01'},
+ {'type': 'data_router', 'name': 'feed01', 'username': 'hero',
+ 'password': '123456', 'location': 'Bedminster',
+ 'route': 'some-path'}],
+ 'topic01': {'type': 'message_router', 'name': 'topic01'}}
+ test_input["service_component_name"] = "some-foo-service-component"
+
+ def fake_lookup_service(name, with_port=False):
+ if with_port:
+ return "10.100.1.100:8080"
+ else:
+ return
+
+ monkeypatch.setattr(k8splugin.tasks, "_lookup_service",
+ fake_lookup_service)
+
+ expected = copy.deepcopy(test_input)
+ expected["feed01"]["delivery_url"] = "http://10.100.1.100:8080/some-path"
+
+ assert tasks._update_delivery_url(**test_input) == expected
+
+
+def test_enhance_docker_params(mockconfig):
+ from k8splugin import tasks
+ # Good - Test empty docker config
+
+ test_kwargs = { "docker_config": {}, "service_id": None }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {"SERVICE_TAGS": ""}, 'docker_config': {}, "service_id": None }
+
+ # Good - Test just docker config ports and volumes
+
+ test_kwargs = { "docker_config": { "ports": ["1:1", "2:2"],
+ "volumes": [{"container": "somewhere", "host": "somewhere else"}] },
+ "service_id": None }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {"SERVICE_TAGS": ""}, 'docker_config': {'ports': ['1:1', '2:2'],
+ 'volumes': [{'host': 'somewhere else', 'container': 'somewhere'}]},
+ 'ports': ['1:1', '2:2'], 'volumes': [{'host': 'somewhere else',
+ 'container': 'somewhere'}], "service_id": None}
+
+ # Good - Test just docker config ports and volumes with overrrides
+
+ test_kwargs = { "docker_config": { "ports": ["1:1", "2:2"],
+ "volumes": [{"container": "somewhere", "host": "somewhere else"}] },
+ "ports": ["3:3", "4:4"], "volumes": [{"container": "nowhere", "host":
+ "nowhere else"}],
+ "service_id": None }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual == {'envs': {"SERVICE_TAGS": ""}, 'docker_config': {'ports': ['1:1', '2:2'],
+ 'volumes': [{'host': 'somewhere else', 'container': 'somewhere'}]},
+ 'ports': ['1:1', '2:2', '3:3', '4:4'], 'volumes': [{'host': 'somewhere else',
+ 'container': 'somewhere'}, {'host': 'nowhere else', 'container':
+ 'nowhere'}], "service_id": None}
+
+ # Good
+
+ test_kwargs = { "docker_config": {}, "service_id": "zed",
+ "deployment_id": "abc" }
+ actual = tasks._enhance_docker_params(**test_kwargs)
+
+ assert actual["envs"] == {"SERVICE_TAGS": "abc,zed"}
+
+
+def test_notify_container(mockconfig):
+ from k8splugin import tasks
+
+ test_input = { "docker_config": { "policy": { "trigger_type": "unknown" } } }
+ assert [] == tasks._notify_container(**test_input) \ No newline at end of file
diff --git a/k8s/tests/test_utils.py b/k8s/tests/test_utils.py
new file mode 100644
index 0000000..0b7cba4
--- /dev/null
+++ b/k8s/tests/test_utils.py
@@ -0,0 +1,33 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+def test_random_string(mockconfig):
+ from k8splugin import utils
+
+ target_length = 10
+ assert len(utils.random_string(target_length)) == target_length
+
+
+def test_update_dict(mockconfig):
+ from k8splugin import utils
+
+ d = { "a": 1, "b": 2 }
+ u = { "a": 2, "b": 3 }
+ assert utils.update_dict(d, u) == u
diff --git a/k8s/tox.ini b/k8s/tox.ini
new file mode 100644
index 0000000..d58364b
--- /dev/null
+++ b/k8s/tox.ini
@@ -0,0 +1,15 @@
+# content of: tox.ini , put in same dir as setup.py
+[tox]
+envlist = py27
+
+[testenv]
+deps=
+ -rrequirements.txt
+ pytest
+ coverage
+ pytest-cov
+setenv=
+ PYTHONPATH={toxinidir}
+commands=
+ pytest --junitxml xunit-results.xml --cov k8splugin --cov-report xml
+ coverage xml \ No newline at end of file
diff --git a/mvn-phase-lib.sh b/mvn-phase-lib.sh
deleted file mode 100755
index 3a41708..0000000
--- a/mvn-phase-lib.sh
+++ /dev/null
@@ -1,431 +0,0 @@
-#!/bin/bash
-
-# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-
-
-#MVN_PROJECT_MODULEID="$1"
-#MVN_PHASE="$2"
-#PROJECT_ROOT=$(dirname $0)
-
-FQDN="${MVN_PROJECT_GROUPID}.${MVN_PROJECT_ARTIFACTID}"
-if [ "$MVN_PROJECT_MODULEID" == "__" ]; then
- MVN_PROJECT_MODULEID=""
-fi
-
-if [[ "$MVN_PROJECT_VERSION" == *SNAPSHOT ]]; then
- echo "=> for SNAPSHOT artifact build"
- MVN_DEPLOYMENT_TYPE='SNAPSHOT'
-else
- echo "=> for STAGING/RELEASE artifact build"
- MVN_DEPLOYMENT_TYPE='STAGING'
-fi
-echo "MVN_DEPLOYMENT_TYPE is [$MVN_DEPLOYMENT_TYPE]"
-
-
-TIMESTAMP=$(date +%C%y%m%dT%H%M%S)
-
-# expected environment variables
-if [ -z "${MVN_NEXUSPROXY}" ]; then
- echo "MVN_NEXUSPROXY environment variable not set. Cannot proceed"
- exit
-fi
-MVN_NEXUSPROXY_HOST=$(echo "$MVN_NEXUSPROXY" |cut -f3 -d'/' | cut -f1 -d':')
-echo "=> Nexus Proxy at $MVN_NEXUSPROXY_HOST, $MVN_NEXUSPROXY"
-
-if [ -z "$WORKSPACE" ]; then
- WORKSPACE=$(pwd)
-fi
-
-if [ -z "$SETTINGS_FILE" ]; then
- echo "SETTINGS_FILE environment variable not set. Cannot proceed"
- exit
-fi
-
-
-
-# mvn phase in life cycle
-MVN_PHASE="$2"
-
-echo "MVN_PROJECT_MODULEID is [$MVN_PROJECT_MODULEID]"
-echo "MVN_PHASE is [$MVN_PHASE]"
-echo "MVN_PROJECT_GROUPID is [$MVN_PROJECT_GROUPID]"
-echo "MVN_PROJECT_ARTIFACTID is [$MVN_PROJECT_ARTIFACTID]"
-echo "MVN_PROJECT_VERSION is [$MVN_PROJECT_VERSION]"
-echo "MVN_NEXUSPROXY is [$MVN_NEXUSPROXY]"
-echo "MVN_RAWREPO_BASEURL_UPLOAD is [$MVN_RAWREPO_BASEURL_UPLOAD]"
-echo "MVN_RAWREPO_BASEURL_DOWNLOAD is [$MVN_RAWREPO_BASEURL_DOWNLOAD]"
-MVN_RAWREPO_HOST=$(echo "$MVN_RAWREPO_BASEURL_UPLOAD" | cut -f3 -d'/' |cut -f1 -d':')
-echo "MVN_RAWREPO_HOST is [$MVN_RAWREPO_HOST]"
-echo "MVN_RAWREPO_SERVERID is [$MVN_RAWREPO_SERVERID]"
-echo "MVN_DOCKERREGISTRY_SNAPSHOT is [$MVN_DOCKERREGISTRY_SNAPSHOT]"
-echo "MVN_DOCKERREGISTRY_PUBLIC is [$MVN_DOCKERREGISTRY_PUBLIC]"
-echo "MVN_DOCKERREGISTRY_RELEASE is [$MVN_DOCKERREGISTRY_RELEASE]"
-
-echo "MVN_PYPISERVER_SERVERID [$MVN_PYPISERVER_SERVERID]"
-echo "MVN_PYPISERVER_BASEURL is [$MVN_PYPISERVER_BASEURL]"
-
-
-
-clean_templated_files()
-{
- TEMPLATE_FILES=$(find . -name "*-template")
- for F in $TEMPLATE_FILES; do
- F2=$(echo "$F" | sed 's/-template$//')
- rm -f "$F2"
- done
-}
-clean_tox_files()
-{
- TOX_FILES=$(find . -name ".tox")
- TOX_FILES="$TOX_FILES $(find . -name 'venv-tox')"
- for F in $TOX_FILES; do
- rm -rf "$F"
- done
-}
-
-expand_templates()
-{
- # set up env variables, get ready for template resolution
- # NOTE: CCSDK artifacts do not distinguish REALESE vs SNAPSHOTs
- export ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.ccsdk.platform.plugins"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_snapshots="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.ccsdk.platform.plugins"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_blueprints_releases="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.ccsdk.platform.blueprints"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_blueprints_snapshots="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.ccsdk.platform.blueprints"
-
- export ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_releases="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.dcaegen2/releases"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_snapshots="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.dcaegen2/snapshots"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.dcaegen2.platform.plugins/releases"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_snapshots="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.dcaegen2.platform.plugins/snapshots"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.dcaegen2.platform.blueprints/releases"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_snapshots="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.dcaegen2.platform.blueprints/snapshots"
-
- export ONAPTEMPLATE_PYPIURL_org_onap_dcaegen2="${MVN_PYPISERVER_BASEURL}"
-
- # docker registry templates are for poll, so use PUBLIC registry
- export ONAPTEMPLATE_DOCKERREGURL_org_onap_dcaegen2_releases="$MVN_DOCKERREGISTRY_PUBLIC"
- export ONAPTEMPLATE_DOCKERREGURL_org_onap_dcaegen2_snapshots="${MVN_DOCKERREGISTRY_PUBLIC}/snapshots"
-
-
- TEMPLATE_FILES=$(find . -name "*-template")
- for F in $TEMPLATE_FILES; do
- F2=$(echo "$F" | sed 's/-template$//')
- cp "$F" "$F2"
- MOD=$(stat --format '%a' "$F")
- chmod "$MOD" "$F2"
- done
-
-
- TEMPLATES=$(env |grep ONAPTEMPLATE)
- if [ -z "$TEMPLATES" ]; then
- return 0
- fi
-
- echo "====> Resolving the following temaplate from environment variables "
- echo "[$TEMPLATES]"
- SELFFILE=$(echo "$0" | rev | cut -f1 -d '/' | rev)
- for TEMPLATE in $TEMPLATES; do
- KEY=$(echo "$TEMPLATE" | cut -f1 -d'=')
- VALUE=$(echo "$TEMPLATE" | cut -f2 -d'=')
- VALUE2=$(echo "$TEMPLATE" | cut -f2 -d'=' |sed 's/\//\\\//g')
- set +e
- FILES=$(grep -rl "$KEY")
- set -e
-
- if [ -z "$FILES" ]; then
- continue
- fi
-
- # assuming FILES is not longer than 2M bytes, the limit for variable value max size on this VM
- for F in $FILES; do
- if [[ $F == *"$SELFFILE" ]]; then
- continue
- fi
- if [[ "$F" == *-template ]]; then
- continue
- fi
-
- echo "======> Resolving template $KEY to value $VALUE for file $F"
- sed -i "s/{{[[:space:]]*$KEY[[:space:]]*}}/$VALUE2/g" "$F"
- #cat "$F"
- done
-
- #if [ ! -z "$FILES" ]; then
- # echo "====> Resolving template $VALUE to value $VALUE"
- # #CMD="grep -rl \"$VALUE\" | tr '\n' '\0' | xargs -0 sed -i \"s/{{[[:space:]]*$VALUE[[:space:]]*}}/$VALUE/g\""
- # grep -rl "$KEY" | tr '\n' '\0' | xargs -0 sed -i 's/$KEY/$VALUE2/g'
- # #echo $CMD
- # #eval $CMD
- #fi
- done
- echo "====> Done template reolving"
-}
-
-
-run_tox_test()
-{
- set -x
- CURDIR=$(pwd)
- TOXINIS=$(find . -name "tox.ini")
- for TOXINI in "${TOXINIS[@]}"; do
- DIR=$(echo "$TOXINI" | rev | cut -f2- -d'/' | rev)
- cd "${CURDIR}/${DIR}"
- rm -rf ./venv-tox ./.tox
- virtualenv ./venv-tox
- source ./venv-tox/bin/activate
- pip install --upgrade pip
- pip install --upgrade tox argparse
- pip freeze
- tox
- deactivate
- rm -rf ./venv-tox ./.tox
- done
-}
-
-build_wagons()
-{
- rm -rf ./*.wgn venv-pkg
- SETUPFILES=$(find . -name "setup.py")
-
- virtualenv ./venv-pkg
- source ./venv-pkg/bin/activate
- pip install --upgrade pip
- pip install wagon
-
- CURDIR=$(pwd)
- for SETUPFILE in $SETUPFILES; do
- PLUGIN_DIR=$(dirname "$SETUPFILE")
- PLUGIN_NAME=$(grep 'name' "$SETUPFILE" | cut -f2 -d'=' | sed 's/[^0-9a-zA-Z\.]*//g')
- PLUGIN_VERSION=$(grep 'version' "$SETUPFILE" | cut -f2 -d'=' | sed 's/[^0-9\.]*//g')
-
- echo "In $PLUGIN_DIR, build plugin $PLUGIN_NAME, version $PLUGIN_VERSION"
-
- wagon create --format tar.gz "${PLUGIN_DIR}"
-
- PKG_FILE_NAMES=( "${PLUGIN_NAME}-${PLUGIN_VERSION}"*.wgn )
- echo Built package: "${PKG_FILE_NAMES[@]}"
- cd $CURDIR
- done
-
- deactivate
- rm -rf venv-pkg
-}
-
-
-upload_raw_file()
-{
- # Extract the username and password to the nexus repo from the settings file
- USER=$(xpath -q -e "//servers/server[id='$MVN_RAWREPO_SERVERID']/username/text()" "$SETTINGS_FILE")
- PASS=$(xpath -q -e "//servers/server[id='$MVN_RAWREPO_SERVERID']/password/text()" "$SETTINGS_FILE")
- NETRC=$(mktemp)
- echo "machine $MVN_RAWREPO_HOST login $USER password $PASS" > "$NETRC"
-
- REPO="$MVN_RAWREPO_BASEURL_UPLOAD"
-
- OUTPUT_FILE=$1
- EXT=$(echo "$OUTPUT_FILE" | rev |cut -f1 -d '.' |rev)
- if [ "$EXT" == 'yaml' ]; then
- OUTPUT_FILE_TYPE='text/x-yaml'
- elif [ "$EXT" == 'sh' ]; then
- OUTPUT_FILE_TYPE='text/x-shellscript'
- elif [ "$EXT" == 'gz' ]; then
- OUTPUT_FILE_TYPE='application/gzip'
- elif [ "$EXT" == 'wgn' ]; then
- OUTPUT_FILE_TYPE='application/gzip'
- else
- OUTPUT_FILE_TYPE='application/octet-stream'
- fi
-
- # for multi module projects, the raw repo path must match with project name, not project + module
- # FQDN is project + module
- # GROUPID is project name
- if [ "$MVN_PROJECT_ARTIFACTID" == "$MVN_PROJECT_MODULEID" ]; then
- PROJECT_NAME=${MVN_PROJECT_GROUPID}
- else
- PROJECT_NAME=${FQDN}
- fi
- if [ "$MVN_DEPLOYMENT_TYPE" == 'SNAPSHOT' ]; then
- SEND_TO="${REPO}/${PROJECT_NAME}/snapshots"
- elif [ "$MVN_DEPLOYMENT_TYPE" == 'STAGING' ]; then
- SEND_TO="${REPO}/${PROJECT_NAME}/releases"
- else
- echo "Unreconfnized deployment type, quit"
- exit
- fi
- #if [ ! -z "$MVN_PROJECT_MODULEID" ]; then
- # SEND_TO="$SEND_TO/$MVN_PROJECT_MODULEID"
- #fi
- if [ ! -z "$2" ]; then
- SEND_TO="$SEND_TO/$2"
- fi
-
- echo "Sending ${OUTPUT_FILE} to Nexus: ${SEND_TO}"
- curl -vkn --netrc-file "${NETRC}" --upload-file "${OUTPUT_FILE}" -X PUT -H "Content-Type: $OUTPUT_FILE_TYPE" "${SEND_TO}/${OUTPUT_FILE}-${TIMESTAMP}"
- curl -vkn --netrc-file "${NETRC}" --upload-file "${OUTPUT_FILE}" -X PUT -H "Content-Type: $OUTPUT_FILE_TYPE" "${SEND_TO}/${OUTPUT_FILE}"
-}
-
-
-
-upload_wagons_and_type_yamls()
-{
- SETUPFILES=$(find . -name "setup.py")
-
- CURDIR=$(pwd)
- for SETUPFILE in $SETUPFILES; do
- PLUGIN_DIR=$(dirname "$SETUPFILE")
- PLUGIN_NAME=$(grep 'name' "$SETUPFILE" | cut -f2 -d'=' | sed 's/[^0-9a-zA-Z\.]*//g')
- PLUGIN_VERSION=$(grep 'version' "$SETUPFILE" | cut -f2 -d'=' | sed 's/[^0-9\.]*//g')
- PLUGIN_VERSION_MAJOR=$(echo "$PLUGIN_VERSION" | cut -f1 -d'.')
- PLUGIN_VERSION_MAJOR_MINOR=$(echo "$PLUGIN_VERSION" | cut -f1-2 -d'.')
-
- echo "Found setup file in $PLUGIN_DIR, for plugin $PLUGIN_NAME version $PLUGIN_VERSION"
-
- TYPEFILE_NAME=$(grep -R "package_name:[[:space:]]*${PLUGIN_NAME}" | cut -f1 -d ':')
- if [ -z "$TYPEFILE_NAME" ]; then
- echo "!!! No typefile found with matching package name $PLUGIN_NAME"
- exit -1
- fi
- NEWFILENAME="${PLUGIN_NAME}"_types.yaml
- if [ "$TYPEFILE_NAME" != "$NEWFILENAME" ]; then
- echo "copy typefile to standard naming"
- cp -f "$TYPEFILE_NAME" "$NEWFILENAME"
- fi
-
- TYPEFILE_PACKAGE_VERSION=$(grep -R 'package_version' $TYPEFILE_NAME |cut -f2 -d ':' |sed -r 's/\s+//g')
- WAGONFILE_NAME=$(ls -1 $PLUGIN_NAME-$TYPEFILE_PACKAGE_VERSION-*.wgn)
- if [ -z "$WAGONFILE_NAME" ]; then
- echo "!!! No wagonfile found with matching package name and version as required in typefile: "
- echo " $TYPEFILE_NAME plugin $PLUGIN_NAME package version ${TYPEFILE_PACKAGE_VERSION}"
- exit -1
- fi
-
- upload_raw_file "$NEWFILENAME" type_files/${PLUGIN_NAME}/${PLUGIN_VERSION_MAJOR}
- upload_raw_file "$NEWFILENAME" type_files/${PLUGIN_NAME}/${PLUGIN_VERSION_MAJOR_MINOR}
- upload_raw_file "${WAGONFILE_NAME}" "plugins/${PLUGIN_NAME}"
-
- rm -r $WAGONFILE_NAME
- if [ "$TYPEFILE_NAME" != "$NEWFILENAME" ]; then
- rm -f "$NEWFILENAME"
- fi
- done
-}
-
-upload_files_of_extension()
-{
- FILES=$(ls -1 ./*."$1")
- for F in $FILES ; do
- upload_raw_file "$F"
- done
-}
-
-
-generate_pypirc_then_publish()
-{
- set +x
- USER=$(xpath -e "//servers/server[id='$MVN_PYPISERVER_SERVERID']/username/text()" "$SETTINGS_FILE")
- PASS=$(xpath -e "//servers/server[id='$MVN_PYPISERVER_SERVERID']/password/text()" "$SETTINGS_FILE")
-
- if [[ "$MVN_PYPISERVER_BASEURL" != */ ]]; then
- MVN_PYPISERVER_BASEURL="${MVN_PYPISERVER_BASEURL}/"
- fi
-
-
- cat > ~/.pypirc <<EOL
-[distutils]
-index-servers =
- $MVN_PYPISERVER_SERVERID
-
-[$MVN_PYPISERVER_SERVERID]
-repository: $MVN_PYPISERVER_BASEURL
-username: $USER
-password: $PASS
-EOL
-
- # this may fail if a package of same version exists
- python setup.py sdist register -r "$MVN_PYPISERVER_SERVERID" upload -r "$MVN_PYPISERVER_SERVERID"
- set -x
-}
-
-
-
-build_and_push_docker()
-{
- IMAGENAME="onap/${FQDN}.${MVN_PROJECT_MODULEID}"
- IMAGENAME=$(echo "$IMAGENAME" | sed -e 's/_*$//g' -e 's/\.*$//g')
- IMAGENAME=$(echo "$IMAGENAME" | tr '[:upper:]' '[:lower:]')
-
- # use the major and minor version of the MVN artifact version as docker image version
- VERSION="${MVN_PROJECT_VERSION//[^0-9.]/}"
- VERSION2=$(echo "$VERSION" | cut -f1-2 -d'.')
-
- LFQI="${IMAGENAME}:${VERSION}-${TIMESTAMP}"
- # build a docker image
- docker build --rm -f ./Dockerfile -t "${LFQI}" ./
-
- REPO=""
- if [ $MVN_DEPLOYMENT_TYPE == "SNAPSHOT" ]; then
- REPO=$MVN_DOCKERREGISTRY_SNASHOT
- elif [ $MVN_DEPLOYMENT_TYPE == "STAGING" ]; then
- # there seems to be no staging docker registry? set to use SNAPSHOT also
- REPO=$MVN_DOCKERREGISTRY_RELEASE
- else
- echo "Fail to determine DEPLOYMENT_TYPE"
- REPO=$MVN_DOCKERREGISTRY_SNAPSHOT
- fi
- echo "DEPLOYMENT_TYPE is: $MVN_DEPLOYMENT_TYPE, repo is $REPO"
-
- if [ ! -z "$REPO" ]; then
- USER=$(xpath -e "//servers/server[id='$REPO']/username/text()" "$SETTINGS_FILE")
- PASS=$(xpath -e "//servers/server[id='$REPO']/password/text()" "$SETTINGS_FILE")
- if [ -z "$USER" ]; then
- echo "Error: no user provided"
- fi
- if [ -z "$PASS" ]; then
- echo "Error: no password provided"
- fi
- [ -z "$PASS" ] && PASS_PROVIDED="<empty>" || PASS_PROVIDED="<password>"
- echo docker login "$REPO" -u "$USER" -p "$PASS_PROVIDED"
- docker login "$REPO" -u "$USER" -p "$PASS"
-
- if [ $MVN_DEPLOYMENT_TYPE == "SNAPSHOT" ]; then
- REPO="$REPO/snapshots"
- elif [ $MVN_DEPLOYMENT_TYPE == "STAGING" ]; then
- # there seems to be no staging docker registry? set to use SNAPSHOT also
- #REPO=$MVN_DOCKERREGISTRY_RELEASE
- REPO="$REPO"
- else
- echo "Fail to determine DEPLOYMENT_TYPE"
- REPO="$REPO/unknown"
- fi
-
- OLDTAG="${LFQI}"
- PUSHTAGS="${REPO}/${IMAGENAME}:${VERSION2}-${TIMESTAMP} ${REPO}/${IMAGENAME}:${VERSION2} ${REPO}/${IMAGENAME}:${VERSION2}-latest"
- for NEWTAG in ${PUSHTAGS}
- do
- echo "tagging ${OLDTAG} to ${NEWTAG}"
- docker tag "${OLDTAG}" "${NEWTAG}"
- echo "pushing ${NEWTAG}"
- docker push "${NEWTAG}"
- OLDTAG="${NEWTAG}"
- done
- fi
-
-}
-
-
-
diff --git a/mvn-phase-script.sh b/mvn-phase-script.sh
index 0978017..516eff9 100755
--- a/mvn-phase-script.sh
+++ b/mvn-phase-script.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,8 +15,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
-#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
set -ex
@@ -26,66 +24,21 @@ echo "running script: [$0] for module [$1] at stage [$2]"
MVN_PROJECT_MODULEID="$1"
MVN_PHASE="$2"
-
PROJECT_ROOT=$(dirname $0)
-FQDN="${MVN_PROJECT_GROUPID}.${MVN_PROJECT_ARTIFACTID}"
-if [ "$MVN_PROJECT_MODULEID" == "__" ]; then
- MVN_PROJECT_MODULEID=""
-fi
-
-if [[ "$MVN_PROJECT_VERSION" == *SNAPSHOT ]]; then
- echo "=> for SNAPSHOT artifact build"
- MVN_DEPLOYMENT_TYPE='SNAPSHOT'
+set -e
+RELEASE_TAG=${MVN_RELEASE_TAG:-R3}
+if [ "$RELEASE_TAG" != "R1" ]; then
+ RELEASE_TAGGED_DIR="${RELEASE_TAG}/"
else
- echo "=> for STAGING/RELEASE artifact build"
- MVN_DEPLOYMENT_TYPE='STAGING'
-fi
-echo "MVN_DEPLOYMENT_TYPE is [$MVN_DEPLOYMENT_TYPE]"
-
-
-TIMESTAMP=$(date +%C%y%m%dT%H%M%S)
-
-# expected environment variables
-if [ -z "${MVN_NEXUSPROXY}" ]; then
- echo "MVN_NEXUSPROXY environment variable not set. Cannot proceed"
- exit
+ RELEASE_TAGGED_DIR="releases"
fi
-MVN_NEXUSPROXY_HOST=$(echo "$MVN_NEXUSPROXY" |cut -f3 -d'/' | cut -f1 -d':')
-echo "=> Nexus Proxy at $MVN_NEXUSPROXY_HOST, $MVN_NEXUSPROXY"
-
-if [ -z "$WORKSPACE" ]; then
- WORKSPACE=$(pwd)
+if ! wget -O ${PROJECT_ROOT}/mvn-phase-lib.sh \
+ "$MVN_RAWREPO_BASEURL_DOWNLOAD"/org.onap.dcaegen2.utils/${RELEASE_TAGGED_DIR}scripts/mvn-phase-lib.sh; then
+ echo "Fail to download mvn-phase-lib.sh"
+ exit 1
fi
-
-if [ -z "$SETTINGS_FILE" ]; then
- echo "SETTINGS_FILE environment variable not set. Cannot proceed"
- exit
-fi
-
-
-
-# mvn phase in life cycle
-MVN_PHASE="$2"
-
-
-echo "MVN_PROJECT_MODULEID is [$MVN_PROJECT_MODULEID]"
-echo "MVN_PHASE is [$MVN_PHASE]"
-echo "MVN_PROJECT_GROUPID is [$MVN_PROJECT_GROUPID]"
-echo "MVN_PROJECT_ARTIFACTID is [$MVN_PROJECT_ARTIFACTID]"
-echo "MVN_PROJECT_VERSION is [$MVN_PROJECT_VERSION]"
-echo "MVN_NEXUSPROXY is [$MVN_NEXUSPROXY]"
-echo "MVN_RAWREPO_BASEURL_UPLOAD is [$MVN_RAWREPO_BASEURL_UPLOAD]"
-echo "MVN_RAWREPO_BASEURL_DOWNLOAD is [$MVN_RAWREPO_BASEURL_DOWNLOAD]"
-MVN_RAWREPO_HOST=$(echo "$MVN_RAWREPO_BASEURL_UPLOAD" | cut -f3 -d'/' |cut -f1 -d':')
-echo "MVN_RAWREPO_HOST is [$MVN_RAWREPO_HOST]"
-echo "MVN_RAWREPO_SERVERID is [$MVN_RAWREPO_SERVERID]"
-echo "MVN_DOCKERREGISTRY_SNAPSHOT is [$MVN_DOCKERREGISTRY_SNAPSHOT]"
-echo "MVN_DOCKERREGISTRY_RELEASE is [$MVN_DOCKERREGISTRY_RELEASE]"
-echo "MVN_DOCKERREGISTRY_PUBLIC is [$MVN_DOCKERREGISTRY_PUBLIC]"
-
-
-source "${PROJECT_ROOT}"/mvn-phase-lib.sh
+source "${PROJECT_ROOT}"/mvn-phase-lib.sh
# Customize the section below for each project
@@ -105,20 +58,34 @@ compile)
;;
test)
echo "==> test phase script"
+ run_tox_test
;;
package)
echo "==> package phase script"
- build_wagons
+ case $MVN_PROJECT_MODULEID in
+ cdap|dcae-policy|docker|relationships|k8s)
+ build_archives_for_wagons
+ build_wagons
+ ;;
+ *)
+ ;;
+ esac
;;
install)
echo "==> install phase script"
;;
deploy)
echo "==> deploy phase script"
- upload_wagons_and_type_yamls
+ case $MVN_PROJECT_MODULEID in
+ cdap|dcae-policy|docker|relationships|k8s)
+ upload_wagons_and_type_yamls
+ upload_wagon_archives
+ ;;
+ *)
+ ;;
+ esac
;;
*)
echo "==> unprocessed phase"
;;
esac
-
diff --git a/pom.xml b/pom.xml
index 26a979d..0a420ae 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1,10 +1,31 @@
<?xml version="1.0"?>
+
+<!--
+================================================================================
+Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+
+ECOMP is a trademark and service mark of AT&T Intellectual Property.
+-->
+
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.onap.oparent</groupId>
<artifactId>oparent</artifactId>
- <version>0.1.1</version>
+ <version>1.1.0</version>
</parent>
<groupId>org.onap.dcaegen2.platform</groupId>
<artifactId>plugins</artifactId>
@@ -17,6 +38,7 @@
<module>docker</module>
<module>relationships</module>
<module>dcae-policy</module>
+ <module>k8s</module>
</modules>
<properties>
<onap.nexus.url>https://nexus.onap.org</onap.nexus.url>
@@ -34,6 +56,10 @@
<!-- properties for Pypi server -->
<onap.nexus.pypiserver.baseurl>https://nexus3.onap.org/repository/PyPi</onap.nexus.pypiserver.baseurl>
<onap.nexus.pypiserver.serverid>onap-pypi</onap.nexus.pypiserver.serverid>
+ <!-- customize the SONARQUBE URL -->
+ <sonar.host.url>http://localhost:9000</sonar.host.url>
+ <!-- taken care of in the children -->
+ <sonar.exclusions>**/*.py</sonar.exclusions>
</properties>
<build>
<pluginManagement>
diff --git a/relationships/example_register_to_blueprint.yaml b/relationships/example_register_to_blueprint.yaml
index 52ee40b..6d01f3e 100644
--- a/relationships/example_register_to_blueprint.yaml
+++ b/relationships/example_register_to_blueprint.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/relationships/pom.xml b/relationships/pom.xml
index aba8619..4a1ef79 100644
--- a/relationships/pom.xml
+++ b/relationships/pom.xml
@@ -32,21 +32,13 @@ ECOMP is a trademark and service mark of AT&T Intellectual Property.
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <sonar.skip>true</sonar.skip>
<sonar.sources>.</sonar.sources>
- <!-- customize the SONARQUBE URL -->
- <!-- sonar.host.url>http://localhost:9000</sonar.host.url -->
- <!-- below are language dependent -->
- <!-- for Python -->
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPath>coverage.xml</sonar.python.coverage.reportPath>
<sonar.language>py</sonar.language>
<sonar.pluginName>Python</sonar.pluginName>
<sonar.inclusions>**/*.py</sonar.inclusions>
- <!-- for JavaScaript -->
- <!--
- <sonar.language>js</sonar.language>
- <sonar.pluginName>JS</sonar.pluginName>
- <sonar.inclusions>**/*.js</sonar.inclusions>
- -->
+ <sonar.exclusions>tests/*,setup.py</sonar.exclusions>
</properties>
<build>
<finalName>${project.artifactId}-${project.version}</finalName>
diff --git a/relationships/relationship-types.yaml b/relationships/relationship-types.yaml
index d0ab59f..5625af5 100644
--- a/relationships/relationship-types.yaml
+++ b/relationships/relationship-types.yaml
@@ -1,3 +1,20 @@
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/relationships/requirements.txt b/relationships/requirements.txt
index af1659c..5d5d52e 100644
--- a/relationships/requirements.txt
+++ b/relationships/requirements.txt
@@ -1,10 +1 @@
-bottle==0.12.7
-Jinja2==2.7.2
-MarkupSafe==0.23
-networkx==1.8.1
-pika==0.9.14
-proxy-tools==0.1.0
python-consul==0.6.1
-requests==2.7.0
-requests-toolbelt==0.7.0
-six==1.10.0
diff --git a/relationships/tox.ini b/relationships/tox.ini
index 5d8f4ae..a7157be 100644
--- a/relationships/tox.ini
+++ b/relationships/tox.ini
@@ -8,4 +8,8 @@ deps=
pytest
coverage
pytest-cov
-commands=pytest --junitxml xunit-results.xml --cov {envsitepackagesdir}/relationshipplugin --cov-report=xml
+setenv=
+ PYTHONPATH={toxinidir}
+commands=
+ pytest --junitxml xunit-results.xml --cov relationshipplugin --cov-report xml
+ coverage xml