summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHansen, Tony (th1395) <th1395@att.com>2020-05-13 18:55:54 +0000
committerHansen, Tony (th1395) <th1395@att.com>2020-06-02 20:36:06 +0000
commit7e1efe3174336fa09a56c596af55ba93d7b14a91 (patch)
treebad0fc0c5bb0a54f69e7d046008c423c507f87d6
parent05e95de3b9736160b4229232903e86706fb782e1 (diff)
move plugins from from ccsdk to dcaegen2
copy dmaap, helm, pgaas and sshkeyshare plugins from ccsdk to dcaegen2 Change-Id: Ib257495de6c275c45f0c87a4b42ac21a2fab7979 Signed-off-by: Hansen, Tony (th1395) <th1395@att.com> Issue-ID: DCAEGEN2-2207 Signed-off-by: Hansen, Tony (th1395) <th1395@att.com>
-rw-r--r--.gitignore17
-rw-r--r--LICENSE.txt3
-rw-r--r--clamp-policy/.gitignore3
-rw-r--r--clamp-policy/LICENSE.txt1
-rw-r--r--clamp-policy/clamppolicyplugin/__init__.py1
-rw-r--r--clamp-policy/clamppolicyplugin/tasks.py1
-rw-r--r--clamp-policy/pom.xml1
-rw-r--r--clamp-policy/setup.py1
-rw-r--r--clamp-policy/tests/__init__.py1
-rw-r--r--clamp-policy/tests/log_ctx.py1
-rw-r--r--clamp-policy/tests/mock_cloudify_ctx.py1
-rw-r--r--clamp-policy/tests/mock_setup.py1
-rw-r--r--clamp-policy/tests/test_tasks.py3
-rw-r--r--clamp-policy/tox.ini6
-rw-r--r--dcae-policy/LICENSE.txt5
-rw-r--r--dcae-policy/dcaepolicy-node-type.yaml3
-rw-r--r--dcae-policy/dcaepolicyplugin/__init__.py3
-rw-r--r--dcae-policy/dcaepolicyplugin/discovery.py3
-rw-r--r--dcae-policy/dcaepolicyplugin/tasks.py3
-rw-r--r--dcae-policy/pom.xml1
-rw-r--r--dcae-policy/setup.py3
-rw-r--r--dcae-policy/tests/__init__.py3
-rw-r--r--dcae-policy/tests/log_ctx.py3
-rw-r--r--dcae-policy/tests/mock_cloudify_ctx.py3
-rw-r--r--dcae-policy/tests/mock_setup.py3
-rw-r--r--dcae-policy/tests/test_discovery.py3
-rw-r--r--dcae-policy/tests/test_tasks.py3
-rw-r--r--dcae-policy/tox.ini5
-rw-r--r--dmaap/.gitignore4
-rw-r--r--dmaap/LICENSE.txt17
-rw-r--r--dmaap/README.md324
-rw-r--r--dmaap/consulif/__init__.py0
-rw-r--r--dmaap/consulif/consulif.py125
-rw-r--r--dmaap/dmaap.yaml202
-rw-r--r--dmaap/dmaapcontrollerif/__init__.py1
-rw-r--r--dmaap/dmaapcontrollerif/dmaap_requests.py310
-rw-r--r--dmaap/dmaapplugin/__init__.py82
-rw-r--r--dmaap/dmaapplugin/dmaaputils.py29
-rw-r--r--dmaap/dmaapplugin/dr_bridge.py199
-rw-r--r--dmaap/dmaapplugin/dr_lifecycle.py153
-rw-r--r--dmaap/dmaapplugin/dr_relationships.py219
-rw-r--r--dmaap/dmaapplugin/mr_lifecycle.py143
-rw-r--r--dmaap/dmaapplugin/mr_relationships.py119
-rw-r--r--dmaap/pom.xml327
-rw-r--r--dmaap/requirements.txt3
-rw-r--r--dmaap/setup.py36
-rw-r--r--dmaap/tests/conftest.py88
-rw-r--r--dmaap/tests/test_consulif.py72
-rw-r--r--dmaap/tests/test_dmaapcontrollerif.py113
-rw-r--r--dmaap/tests/test_dr_lifecycle.py65
-rw-r--r--dmaap/tests/test_mr_lifecycle.py59
-rw-r--r--dmaap/tests/test_plugin.py26
-rw-r--r--dmaap/tests/test_utils.py26
-rw-r--r--dmaap/tox.ini36
-rw-r--r--helm/LICENSE29
-rw-r--r--helm/README.md12
-rw-r--r--helm/helm-type.yaml147
-rw-r--r--helm/plugin/__init__.py15
-rw-r--r--helm/plugin/tasks.py504
-rw-r--r--helm/plugin/tests/__init__.py30
-rw-r--r--helm/plugin/tests/blueprint/blueprint.yaml85
-rw-r--r--helm/plugin/tests/blueprint/plugin/test_plugin.yaml139
-rw-r--r--helm/plugin/tests/test_plugin.py192
-rw-r--r--helm/plugin/workflows.py75
-rw-r--r--helm/pom.xml336
-rw-r--r--helm/requirements.txt17
-rw-r--r--helm/setup.py48
-rw-r--r--helm/tox.ini76
-rw-r--r--k8s/.gitignore71
-rw-r--r--k8s/LICENSE.txt5
-rw-r--r--k8s/configure/__init__.py3
-rw-r--r--k8s/k8s-node-type.yaml3
-rw-r--r--k8s/k8sclient/__init__.py5
-rw-r--r--k8s/k8sclient/k8sclient.py1
-rw-r--r--k8s/k8splugin/__init__.py3
-rw-r--r--k8s/k8splugin/cloudify_importer.py1
-rw-r--r--k8s/k8splugin/decorators.py3
-rw-r--r--k8s/k8splugin/discovery.py3
-rw-r--r--k8s/k8splugin/exceptions.py3
-rw-r--r--k8s/k8splugin/utils.py3
-rw-r--r--k8s/tests/common.py2
-rw-r--r--k8s/tests/conftest.py3
-rw-r--r--k8s/tests/test_decorators.py5
-rw-r--r--k8s/tests/test_discovery.py11
-rw-r--r--k8s/tests/test_k8sclient.py2
-rw-r--r--k8s/tests/test_k8sclient_deploy.py2
-rw-r--r--k8s/tests/test_tasks.py3
-rw-r--r--k8s/tests/test_utils.py3
-rw-r--r--k8s/tox.ini5
-rw-r--r--makefile27
-rw-r--r--mvn-phase-lib.sh16
-rwxr-xr-xmvn-phase-script.sh2
-rw-r--r--pgaas/LICENSE.txt17
-rw-r--r--pgaas/MANIFEST.in1
-rw-r--r--pgaas/README.md79
-rw-r--r--pgaas/pgaas/__init__.py13
-rw-r--r--pgaas/pgaas/logginginterface.py53
-rw-r--r--pgaas/pgaas/pgaas_plugin.py779
-rw-r--r--pgaas/pgaas_types.yaml67
-rw-r--r--pgaas/pom.xml327
-rw-r--r--pgaas/requirements.txt2
-rw-r--r--pgaas/setup.py36
-rw-r--r--pgaas/tests/psycopg2.py70
-rw-r--r--pgaas/tests/test_plugin.py291
-rw-r--r--pgaas/tox.ini54
-rw-r--r--pom.xml7
-rw-r--r--relationships/.gitignore67
-rw-r--r--relationships/LICENSE.txt5
-rw-r--r--relationships/example_register_to_blueprint.yaml3
-rw-r--r--relationships/pom.xml1
-rw-r--r--relationships/relationship-types.yaml3
-rw-r--r--relationships/relationshipplugin/__init__.py3
-rw-r--r--relationships/relationshipplugin/discovery.py3
-rw-r--r--relationships/relationshipplugin/tasks.py5
-rw-r--r--relationships/setup.py3
-rw-r--r--relationships/tests/test_discovery.py3
-rw-r--r--relationships/tox.ini5
-rw-r--r--sshkeyshare/LICENSE.txt17
-rw-r--r--sshkeyshare/README.md56
-rw-r--r--sshkeyshare/pom.xml327
-rw-r--r--sshkeyshare/requirements.txt1
-rw-r--r--sshkeyshare/setup.py37
-rw-r--r--sshkeyshare/sshkey_types.yaml17
-rw-r--r--sshkeyshare/sshkeyshare/__init__.py28
-rw-r--r--sshkeyshare/sshkeyshare/keyshare_plugin.py39
-rw-r--r--sshkeyshare/tests/test_plugin.py14
-rw-r--r--sshkeyshare/tox.ini35
127 files changed, 6979 insertions, 242 deletions
diff --git a/.gitignore b/.gitignore
index 22ce2d9..5a7dcdf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,13 +5,15 @@
.DS_Store
.project
.pydevproject
-venv
+venv/
+.venv/
+ENV/
.vscode/
-
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
+*$py.class
# C extensions
*.so
@@ -48,11 +50,14 @@ pip-delete-this-directory.txt
htmlcov/
.tox/
.coverage
+.coveragerc
.coverage.*
.cache
+.pytest_cache/
nosetests.xml
coverage.xml
*,cover
+.hypothesis/
# Translations
*.mo
@@ -64,5 +69,13 @@ coverage.xml
# Sphinx documentation
docs/_build/
+# wagon
+*.wgn
+wheels
+
# PyBuilder
target/
+
+# tox output
+xunit-results*
+htmlcov
diff --git a/LICENSE.txt b/LICENSE.txt
index 9536f0b..80a0d71 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,7 +1,7 @@
/*
* ============LICENSE_START==========================================
* ===================================================================
-* Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+* Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
* ===================================================================
*
* Unless otherwise specified, all software contained herein is licensed
@@ -34,7 +34,6 @@
*
* ============LICENSE_END============================================
*
-* ECOMP is a trademark and service mark of AT&T Intellectual Property.
*
*/
diff --git a/clamp-policy/.gitignore b/clamp-policy/.gitignore
new file mode 100644
index 0000000..06c7571
--- /dev/null
+++ b/clamp-policy/.gitignore
@@ -0,0 +1,3 @@
+# local additions to plugins .gitignore
+xunit-results*
+htmlcov
diff --git a/clamp-policy/LICENSE.txt b/clamp-policy/LICENSE.txt
index 5bf12c1..c8b67cd 100644
--- a/clamp-policy/LICENSE.txt
+++ b/clamp-policy/LICENSE.txt
@@ -1,5 +1,6 @@
================================================================================
Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
+Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/clamp-policy/clamppolicyplugin/__init__.py b/clamp-policy/clamppolicyplugin/__init__.py
index 9f32d4c..2b6f44f 100644
--- a/clamp-policy/clamppolicyplugin/__init__.py
+++ b/clamp-policy/clamppolicyplugin/__init__.py
@@ -1,5 +1,6 @@
# ================================================================================
# Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/clamp-policy/clamppolicyplugin/tasks.py b/clamp-policy/clamppolicyplugin/tasks.py
index 40b1659..6308172 100644
--- a/clamp-policy/clamppolicyplugin/tasks.py
+++ b/clamp-policy/clamppolicyplugin/tasks.py
@@ -1,5 +1,6 @@
# ================================================================================
# Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/clamp-policy/pom.xml b/clamp-policy/pom.xml
index 460eb24..4bfe5d1 100644
--- a/clamp-policy/pom.xml
+++ b/clamp-policy/pom.xml
@@ -2,6 +2,7 @@
<!--
================================================================================
Copyright (c) 2019-2020 Wipro Limited Intellectual Property. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/clamp-policy/setup.py b/clamp-policy/setup.py
index 225c8d0..e3c7e94 100644
--- a/clamp-policy/setup.py
+++ b/clamp-policy/setup.py
@@ -1,6 +1,7 @@
# ================================================================================
# Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/clamp-policy/tests/__init__.py b/clamp-policy/tests/__init__.py
index 20c17e4..4f04502 100644
--- a/clamp-policy/tests/__init__.py
+++ b/clamp-policy/tests/__init__.py
@@ -1,5 +1,6 @@
# ================================================================================
# Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/clamp-policy/tests/log_ctx.py b/clamp-policy/tests/log_ctx.py
index 89ed9ea..ba7c5ee 100644
--- a/clamp-policy/tests/log_ctx.py
+++ b/clamp-policy/tests/log_ctx.py
@@ -1,5 +1,6 @@
# ================================================================================
# Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/clamp-policy/tests/mock_cloudify_ctx.py b/clamp-policy/tests/mock_cloudify_ctx.py
index cf40232..dfd33aa 100644
--- a/clamp-policy/tests/mock_cloudify_ctx.py
+++ b/clamp-policy/tests/mock_cloudify_ctx.py
@@ -1,5 +1,6 @@
# ================================================================================
# Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/clamp-policy/tests/mock_setup.py b/clamp-policy/tests/mock_setup.py
index 147ba43..3ab37c6 100644
--- a/clamp-policy/tests/mock_setup.py
+++ b/clamp-policy/tests/mock_setup.py
@@ -1,5 +1,6 @@
# ================================================================================
# Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/clamp-policy/tests/test_tasks.py b/clamp-policy/tests/test_tasks.py
index f4c8d5a..21c7660 100644
--- a/clamp-policy/tests/test_tasks.py
+++ b/clamp-policy/tests/test_tasks.py
@@ -1,5 +1,6 @@
# ================================================================================
# Copyright (c) 2019 Wipro Limited Intellectual Property. All rights reserved.
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -43,4 +44,4 @@ def test_policy_get():
"""test policy_get operation on clamp.nodes.policy node"""
mock_ctx = MockCloudifyContext(node_id='policy_model_id',node_name='clamp.nodes.policy')
current_ctx.set(mock_ctx)
- tasks.policy_get() \ No newline at end of file
+ tasks.policy_get()
diff --git a/clamp-policy/tox.ini b/clamp-policy/tox.ini
index 21ecdb2..f7b4cdb 100644
--- a/clamp-policy/tox.ini
+++ b/clamp-policy/tox.ini
@@ -1,5 +1,7 @@
[tox]
-envlist = py27,py36,cov
+envlist = py27,py36,py37,py38,cov
+skip_missing_interpreters = true
+
[testenv]
# coverage can only find modules if pythonpath is set
@@ -24,6 +26,8 @@ setenv=
commands=
coverage combine
coverage xml
+ coverage report
+ coverage html
[pytest]
junit_family = xunit2
diff --git a/dcae-policy/LICENSE.txt b/dcae-policy/LICENSE.txt
index 948807c..d45be13 100644
--- a/dcae-policy/LICENSE.txt
+++ b/dcae-policy/LICENSE.txt
@@ -1,5 +1,5 @@
================================================================================
-Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,10 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
============LICENSE_END=========================================================
-ECOMP is a trademark and service mark of AT&T Intellectual Property.
-Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
===================================================================
Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
you may not use this documentation except in compliance with the License.
diff --git a/dcae-policy/dcaepolicy-node-type.yaml b/dcae-policy/dcaepolicy-node-type.yaml
index 75d2137..7a1b8ad 100644
--- a/dcae-policy/dcaepolicy-node-type.yaml
+++ b/dcae-policy/dcaepolicy-node-type.yaml
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
plugins:
dcaepolicy:
diff --git a/dcae-policy/dcaepolicyplugin/__init__.py b/dcae-policy/dcaepolicyplugin/__init__.py
index 173c1eb..5a69abb 100644
--- a/dcae-policy/dcaepolicyplugin/__init__.py
+++ b/dcae-policy/dcaepolicyplugin/__init__.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
""":policyplugin: gets the policy from policy-handler and stores it into runtime properties"""
diff --git a/dcae-policy/dcaepolicyplugin/discovery.py b/dcae-policy/dcaepolicyplugin/discovery.py
index 8612160..0dab179 100644
--- a/dcae-policy/dcaepolicyplugin/discovery.py
+++ b/dcae-policy/dcaepolicyplugin/discovery.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""client to talk to consul on standard port 8500"""
diff --git a/dcae-policy/dcaepolicyplugin/tasks.py b/dcae-policy/dcaepolicyplugin/tasks.py
index 2e62b15..0ab48f8 100644
--- a/dcae-policy/dcaepolicyplugin/tasks.py
+++ b/dcae-policy/dcaepolicyplugin/tasks.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +15,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""tasks are the cloudify operations invoked on interfaces defined in the blueprint"""
diff --git a/dcae-policy/pom.xml b/dcae-policy/pom.xml
index 6a05c1d..d62861c 100644
--- a/dcae-policy/pom.xml
+++ b/dcae-policy/pom.xml
@@ -16,7 +16,6 @@ See the License for the specific language governing permissions and
limitations under the License.
============LICENSE_END=========================================================
-ECOMP is a trademark and service mark of AT&T Intellectual Property.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
diff --git a/dcae-policy/setup.py b/dcae-policy/setup.py
index de0f29c..e862dbd 100644
--- a/dcae-policy/setup.py
+++ b/dcae-policy/setup.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +15,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""package for dcaepolicyplugin - getting policies from policy-engine through policy-handler"""
diff --git a/dcae-policy/tests/__init__.py b/dcae-policy/tests/__init__.py
index 5d59d8b..94ae183 100644
--- a/dcae-policy/tests/__init__.py
+++ b/dcae-policy/tests/__init__.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,4 +14,3 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
diff --git a/dcae-policy/tests/log_ctx.py b/dcae-policy/tests/log_ctx.py
index 7685893..03ecb78 100644
--- a/dcae-policy/tests/log_ctx.py
+++ b/dcae-policy/tests/log_ctx.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
""":@CtxLogger.log_ctx: decorator for logging the cloudify ctx before and after operation"""
diff --git a/dcae-policy/tests/mock_cloudify_ctx.py b/dcae-policy/tests/mock_cloudify_ctx.py
index fb52b43..e9a8eb9 100644
--- a/dcae-policy/tests/mock_cloudify_ctx.py
+++ b/dcae-policy/tests/mock_cloudify_ctx.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""mock cloudify context with relationships and type_hierarchy"""
diff --git a/dcae-policy/tests/mock_setup.py b/dcae-policy/tests/mock_setup.py
index cbc4a35..29c9f21 100644
--- a/dcae-policy/tests/mock_setup.py
+++ b/dcae-policy/tests/mock_setup.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""unit tests for tasks in dcaepolicyplugin"""
diff --git a/dcae-policy/tests/test_discovery.py b/dcae-policy/tests/test_discovery.py
index 07c652e..838df21 100644
--- a/dcae-policy/tests/test_discovery.py
+++ b/dcae-policy/tests/test_discovery.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2019-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +15,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""unit tests for discovery in dcaepolicyplugin"""
diff --git a/dcae-policy/tests/test_tasks.py b/dcae-policy/tests/test_tasks.py
index dc89d65..51d4878 100644
--- a/dcae-policy/tests/test_tasks.py
+++ b/dcae-policy/tests/test_tasks.py
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""unit tests for tasks in dcaepolicyplugin"""
diff --git a/dcae-policy/tox.ini b/dcae-policy/tox.ini
index 2315666..5edbe5b 100644
--- a/dcae-policy/tox.ini
+++ b/dcae-policy/tox.ini
@@ -1,5 +1,6 @@
[tox]
-envlist = py27,py36,cov
+envlist = py27,py36,py37,py38,cov
+skip_missing_interpreters = true
[testenv]
# coverage can only find modules if pythonpath is set
@@ -27,6 +28,8 @@ setenv=
commands=
coverage combine
coverage xml
+ coverage report
+ coverage html
[pytest]
junit_family = xunit2
diff --git a/dmaap/.gitignore b/dmaap/.gitignore
new file mode 100644
index 0000000..9ef55c9
--- /dev/null
+++ b/dmaap/.gitignore
@@ -0,0 +1,4 @@
+# local additions to plugins .gitignore
+wheels
+cdap.zip
+docker.zip
diff --git a/dmaap/LICENSE.txt b/dmaap/LICENSE.txt
new file mode 100644
index 0000000..86c0033
--- /dev/null
+++ b/dmaap/LICENSE.txt
@@ -0,0 +1,17 @@
+============LICENSE_START=======================================================
+org.onap.dcaegen2
+================================================================================
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
diff --git a/dmaap/README.md b/dmaap/README.md
new file mode 100644
index 0000000..55ac621
--- /dev/null
+++ b/dmaap/README.md
@@ -0,0 +1,324 @@
+## Cloudify DMaaP Plugin
+Cloudify plugin for creating and managing DMaaP Data Router feeds and subscriptions and
+DMaaP Message Router topics. The plugin uses the DMaaP Bus Controller API.
+
+### Plugin Support for DMaaP Data Router
+#### Plugin Types for DMaaP Data Router
+The Cloudify type definitions for DMaaP Data Router nodes and relationships
+are defined in [`dmaap.yaml`](./dmaap.yaml).
+
+There are four node types for DMaaP Data Router:
+
+- `dcaegen2.nodes.Feed`: This type represents a feed that does not yet
+exist and that should be created when the install workflow is
+run against a blueprint that contains a node of this type.
+
+Property|Type|Required?|Description |
+--------|----|---------|---------------------------------------
+feed_name|string|no|a name that identifies the feed (plugin will generate if absent)
+feed_version|string|no|version number for the feed (feed_name + feed_version uniquely identify the feed in DR)
+feed_description|string|no|human-readable description of the feed
+aspr_classification|string|no|AT&T ASPR classification of the feed
+
+
+- `dcaegen2.nodes.ExistingFeed`: This type represents a feed that
+already exists. Nodes of this type are placed in a blueprint so
+that other nodes in the blueprint can be set up as publishers or
+subscribers to the feed. The table below shows the properties that a node
+of this type may have.
+
+Property|Type|Required?|Description
+--------|----|---------|----------------------------------------
+feed_id|string|no|Feed identifier assigned by DMaaP when the feed was created
+feed_name|string|no|a name that identifies the feed
+
+- `dcaegen2.nodes.ExternalTargetFeed`: This type represents a feed created in an external DMaaP
+environment (i.e., an environment that the plugin cannot access to make provisioning requests, such as
+a shared corporate system). Nodes of this type are placed in a blueprint so that other feed nodes of
+type `dcaegen2.nodes.Feed` or `dcaegen2.nodes.ExistingFeed` can be set up to "bridge" to external feeds by
+publishing data to the external feeds. The table below shows the properties that a node of this type
+may have.
+
+Property|Type|Required?|Description
+--------|----|---------|----------------------------------------
+url|string|yes|The publish URL of the external feed.
+username|string|yes|The username to be used when delivering to the external feed
+userpw|string|yes|The password to be used when delivering to the external feed
+
+_Note: These properties are usually obtained by manually creating a feed in the external
+DMaaP DR system and then creating a publisher for that feed._
+
+- `dcaegen2.nodes.ExternalSourceFeed`: This type represents a feed created in an external DMaaP
+environment (i.e., an environment that the plugin cannot access to makes provisioning requests, such as
+a shared corporate system). Nodes of this type are place in a blueprint so that they can be set up to
+"bridge" to other feed nodes of type `dcaegen2.nodes.Feed` or `dcaegen2.nodes.ExistingFeed`. This type
+has no node properties, but when a bridge is set up, the url, username, and password are attached to the
+node as runtime_properties, using the name of the target feed node as the top-level key.
+
+There are five relationship types for DMaaP Data Router:
+
+- `dcaegen2.relationships.publish_files`, used to
+indicate that the relationship's source node sends is a publisher to the
+Data Router feed represented by the relationship's target node.
+- `dcaegen2.relationships.subscribe_to_files`, used to
+indicate that the relationship's source node is a subscriber to the
+Data Router feed represented by the relationship's target node.
+- `dcaegen2.relationships.bridges_to`, used to indicate that the relationship's source
+node (a `dcaegen2.nodes.Feed` or `dcaegen2.nodes.ExistingFeed`) should be set up
+to forward data ("bridge") to the relationship's target feed (another `dcaegen2.nodes.Feed` or
+`dcaegen2.nodes.ExistingFeed`).
+- `dcaegen2.relationships.bridges_to_external`, used to indicate that the relationship's source
+node (a `dcaegen2.nodes.Feed` or `dcaegen2.nodes.ExistingFeed`) should be set up
+to forward data ("bridge") to the relationship's target node (a feed in an external DMaaP system,
+represented by a `dcaegen2.nodes.ExternalTargetFeed` node).
+- `dcaegen2.relationships.bridges_from_external_to_internal`, used to indicate the the relationship's source
+node (a feed in an external DMaaP system, represented by a `dcaegen2.nodes.ExternalSourceFeed` node) should be set up to forward date ("bridge")
+to the relationship's target node (an internal ONAP feed, represented by a `dcaegen2.nodes.Feed` or `dcaegen2.nodes.ExistingFeed` node).
+
+The plugin code implements the lifecycle operations needed to create and
+delete feeds and to add and remove publishers and subscribers. It also implements
+the operations needed to set up bridging between feeds.
+
+#### Interaction with Other Plugins
+When creating a new feed or processing a reference to an existing feed,
+the plugin operates independently of other plugins.
+
+When processing a `dcaegen2.relationships.publish_files` relationship or a
+`dcaegen2.relationships.subscribe_to_files` relationship, this plugin needs
+to obtain data from the source node and, in the case of `publish_files`, provide
+data to the source node. Certain conventions are therefore needed for
+passing data between this plugin and the plugins responsible for the source
+nodes in these relationships. In Cloudify, the mechanism for
+sharing data among plugins is the `ctx.instance.runtime_properties` dictionary
+associated with each node.
+
+A given source node may have relationships with several feeds. For example, an ONAP DCAE
+data collector might publish two different types of data to two different feeds. An ONAP DCAE
+analytics module might subscribe to one feed to get input for its processing and
+publish its results to a different feed. When this DMaaP plugin and the plugin for the
+source node exchange information, they need to do in a way that lets them distinguish
+among different feeds. We do this through a simple convention: for each source node
+to feed relationship, the source node plugin will create a property in the source node's
+`runtime_properties` dictionary. The name of the property will be the same as the
+name of the target node of the relationship. For instance, if a node has a
+`publishes_files` relationship with a target node named `feed00`, then the plugin that's
+responsible for managing the source node with create an entry in the source node's
+`runtime_properties` dictionary named `feed00`. This entry itself will be a dictionary.
+
+The content of this data exchange dictionary depends on whether the source node is a
+publisher (i.e., the relationship is `publish_files`) or a subscriber (i.e., the
+relationship is `subscribe_to_files`).
+
+For the `publish_files` relationship, the data exchange dictionary has the following
+properties:
+
+Property|Set by|Description
+--------|------|------------------------------------------------
+location|source node plugin|the DMaaP location for the publisher, used to set up routing
+publish_url|DMaaP plugin|the URL to which the publisher makes Data Router publish requests
+log_url|DMaaP plugin|the URL from which log data for the feed can be obtained
+username|DMaaP plugin|the username (generated by the DMaaP plugin) the publisher uses to authenticate to Data Router
+password|DMaaP plugin|the password (generated by the DMaaP plugin) the publisher uses to authenticate to Data Router
+
+For the `subscribe_to_files` relationship, the data exchange dictionary has the following
+properties:
+
+Property|Set by|Description
+--------|------|------------------------------------------------
+location|source node plugin|the DMaaP location for the subscriber, used to set up routing
+delivery_url|source node plugin|the URL to which the Data Router should deliver files
+username|source node plugin|the username Data Router uses to authenticate to the subscriber when delivering files
+password|source node plugin|the username Data Router uses to authenticate to the subscriber when delivering file
+
+### Plugin Support for DMaaP Message Router
+#### Plugin Types for DMaaP Message Router
+The Cloudify type definitions for DMaaP Message Router nodes and relationships
+are defined in [`dmaap.yaml`](./dmaap.yaml).
+
+There are two node types for DMaaP Message Router:
+
+- `dcaegen2.nodes.Topic`: This type represents a topic that does not yet
+exist and that should be created when the install workflow is
+run against a blueprint that contains a node of this type.
+
+Property|Type|Required?|Description
+--------|----|---------|---------------------------------------
+topic_name|string|no|a name that uniquely identifies the feed (plugin will generate if absent or is empty string or contain only whitespace)
+topic_description|string|no|human-readable description of the feed
+txenable|boolean|no|flag indicating whether transactions are enabled for this topic
+replication_case|string|no|type of replication required for the topic (defaults to no replication)
+global_mr_url|string|no|Global MR host name for replication to a global MR instance
+
+Note: In order to set up topics, a user should be familiar with message router and how it is configured,
+and this README is not the place to explain the details of message router. Here are a couple of pieces of
+information that might be helpful.
+Currently, the allowed values for `replication_case` are:
+
+- `REPLICATION_NONE`
+- `REPLICATION_EDGE_TO_CENTRAL`
+- `REPLICATION_EDGE_TO_CENTRAL_TO_GLOBAL`
+- `REPLICATION_CENTRAL_TO_EDGE`
+- `REPLICATION_CENTRAL_TO_GLOBAL`
+- `REPLICATION_GLOBAL_TO_CENTRAL`
+- `REPLICATION_GLOBAL_TO_CENTRAL_TO_EDGE`
+
+The `global_mr_url` is actually a host name, not a full URL. It points to a host in a global message router
+cluster. (A 'global' message router cluster is one that's not part of ONAP.)
+
+- `dcaegen2.nodes.ExistingTopic`: This type represents a topic that
+already exists. Nodes of this type are placed in a blueprint so
+that other nodes in the blueprint can be set up as publishers or
+subscribers to the topic. The table below shows the properties that a node
+of this type may have.
+
+Property|Type|Required?|Description
+--------|----|---------|----------------------------------------
+fqtn|string|no|fully-qualified topic name for the topic
+topic_name|string|no|a name that identifies the topic
+
+#### Interaction with Other Plugins
+When creating a new topic or processing a reference to an existing topic,
+the plugin operates independently of other plugins.
+
+When processing a `dcaegen2.relationships.publish_events` relationship or a
+`dcaegen2.relationships.subscribe_to_events` relationship, this plugin needs
+to obtain data from and provide data to the source node. Certain conventions are therefore needed for
+passing data between this plugin and the plugins responsible for the source
+nodes in these relationships. In Cloudify, the mechanism for
+sharing data among plugins is the `ctx.instance.runtime_properties` dictionary
+associated with each node.
+
+A given source node may have relationships with several topics. For example, an ONAP DCAE
+analytics module might subscribe to one topic to get input for its processing and
+publish its results to a different topic. When this DMaaP plugin and the plugin for the
+source node exchange information, they need to do in a way that lets them distinguish
+among different feeds. We do this through a simple convention: for each source node
+to topic relationship, the source node plugin will create a property in the source node's
+`runtime_properties` dictionary. The name of the property will be the same as the
+name of the target node of the relationship. For instance, if a node has a
+`publishes_events` relationship with a target node named `topic00`, then the plugin that's
+responsible for managing the source node with create an entry in the source node's
+`runtime_properties` dictionary named `topic00`. This entry itself will be a dictionary.
+
+For both types of relationship, the data exchange dictionary has the following
+properties:
+
+Property|Set by|Description
+--------|------|------------------------------------------------
+location|source node plugin|the DMaaP location for the publisher or subscriber, used to set up routing
+client_role|source node plugin|the AAF client role that's requesting publish or subscribe access to the topic
+topic_url|DMaaP plugin|the URL for accessing the topic to publish or receive events
+
+### Interaction with Consul configuration store
+In addition to storing the results of DMaaP Data Router and DMaaP Message Router provisioning operations in `runtime_properties`,
+the DMaaP plugin also stores these results into the ONAP configuration store, which resides in a
+[Consul key-value store](https://www.consul.io/). This allows DMaaP clients (components that act as publishers, subscribers, or both)
+to retrieve their DMaaP configuration information from Consul, rather than having the plugin that deploys the client directly
+configure the client using data in `runtime_properties`.
+
+The `runtime_properties` for a client must contain a property called `service_component_name`. If this property is not present,
+the plugin will raise a NonRecoverableError and cause the installation to fail.
+
+If `service_component_name` is present, then the plugin will use a Consul key consisting of the value
+of `service_component_name` prepended to the fixed string `:dmaap`. For example, if the `service_component_name`
+is `client123`, the plugin will use `client123:dmaap` as the key for storing DMaaP information into Consul.
+Information for all of the feeds and topics for a client are stored under the same key.
+
+The value stored is a nested JSON object. At the top level of the object are properties representing each topic or feed
+for which the component is a publisher or subscriber. The name of the property is the node name of the target feed or topic.
+The value of the property is another JSON object that corresponds to the dictionary that the plugin created in
+`runtime_properties` corresponding to the target feed or topic. Note that the information in Consul includes
+all of the properties for the feed or topic, those set by the source node plugin as well as those set by the DMaaP plugin.
+
+Examples:
+
+Data Router publisher, target feed `feed00`:
+```
+{
+ "feed00": {
+ "username": "rC9QR51I",
+ "log_url": "https://dmaap.example.com/feedlog/972",
+ "publish_url": "https://dmaap.example.com/publish/972",
+ "location": "loc00",
+ "password": "QOQeUh5KLR",
+ "publisher_id": "972.360gm"
+ }
+}
+```
+
+Data Router subscriber, target feed `feed01`:
+```
+{
+ "feed01": {
+ "username": "drdeliver",
+ "password": "1loveDataR0uter",
+ "location": "loc00",
+ "delivery_url": "https://example.com/whatever",
+ "subscriber_id": "1550"
+ }
+}
+```
+
+Message Router publisher to `topic00`, subscriber to `topic01`. Note how each topic
+appears as a top-level property in the object.
+```
+{
+ "topic00": {
+ "topic_url": "https://dmaap.example.com:3905/events/org.onap.dcaegen2.dmaap.FTL2.outboundx",
+ "client_role": "org.onap.dcaegen2.member",
+ "location": "loc00",
+ "client_id": "1494621774522"
+ },
+ "topic01": {
+ "topic_url": "https://dmaap.example.com:3905/events/org.onap.dcaegen2.dmaap.FTL2.inboundx",
+ "client_role": "org.onap.dcaegen2.member",
+ "location": "loc00",
+ "client_id": "1494621778627"
+ }
+}
+```
+
+### Packaging and installing
+The DMaaP plugin is meant to be used as a [Cloudify managed plugin](http://docs.getcloudify.org/3.4.0/plugins/using-plugins/). Managed plugins
+are packaged using [`wagon`](https://github.com/cloudify-cosmo/wagon).
+
+To package this plugin, executing the following command in the top-level directory of this plugin, from a Python environment in which `wagon` has been installed:
+```
+wagon create -s . -r -o /path/to/directory/for/wagon/output
+```
+Once the wagon file is built, it can be uploaded to a Cloudify Manager host using the `cfy plugins upload` command described in the documentation above.
+
+Managed plugins can also be loaded at the time a Cloudify Manager host is installed, via the installation blueprint and inputs file. We expect that this plugin will
+be loaded at Cloudify Manager installation time, and that `cfy plugins upload` will be used only for delivering patches between releases.
+
+### Configuration
+The plugin needs to be configured with certain parameters needed to access the DMaaP Bus Controller. In keeping with the ONAP architecture, this information is
+stored in Consul.
+
+The plugin finds the address and port of the DMaaP Bus Controller using the Consul service discovery facility. The plugin expects the Bus Controller to be
+registered under the name `dmaap_bus_controller`.
+
+Additional parameters come from the `dmaap` key in the Cloudify Manager's Consul configuration, which is stored in the Consul KV store under the key name
+'cloudify_manager'. The table below lists the properties in the configuration:
+
+Property|Type|Required?|Default|Description
+--------|----|---------|-------|--------------------------------
+`username`|string|Yes|(none)|The username for logging into DMaaP Bus Controller
+`password`|string|Yes|(none)|The password for logging into DMaaP Bus Controller
+`owner`|string|Yes|(none)|The name to be used as the owner for entities created by the plugin
+`protocol`|string|No|`https`|The protocol (URL scheme) used to access the DMaaP bus controller (`http` or `https`)
+`path`|string|No|`webapi`|The path to the root of the DMaaP Bus Controller API endpoint
+
+Here is an example of a Cloudify Manager configuration object showing only the `dmaap` key:
+```
+{
+ "dmaap": {
+ "username": "dmaap.client@dcaegen2orch.onap.org",
+ "password": "guessmeifyoucan"
+ "owner": "dcaegen2orc"
+ },
+
+ (other configuration here)
+
+}
+```
diff --git a/dmaap/consulif/__init__.py b/dmaap/consulif/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/dmaap/consulif/__init__.py
diff --git a/dmaap/consulif/consulif.py b/dmaap/consulif/consulif.py
new file mode 100644
index 0000000..a865df4
--- /dev/null
+++ b/dmaap/consulif/consulif.py
@@ -0,0 +1,125 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+import consul
+import json
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+
+class ConsulHandle(object):
+ '''
+ Provide access to Consul KV store and service discovery
+ '''
+
+ def __init__(self, api_url, user, password, logger):
+ '''
+ Constructor
+ '''
+ u = urlparse(api_url)
+ self.ch = consul.Consul(host=u.hostname, port=u.port, scheme=u.scheme)
+
+ def get_config(self, key):
+ '''
+ Get configuration information from Consul using the provided key.
+ It should be in JSON form. Convert it to a dictionary
+ '''
+ (index, val) = self.ch.kv.get(key)
+ config = json.loads(val['Value']) # will raise ValueError if not JSON, let it propagate
+ return config
+
+ def get_service(self,service_name):
+ '''
+ Look up the service named service_name in Consul.
+ Return the service address and port.
+ '''
+ (index, val) = self.ch.catalog.service(service_name)
+ if len(val) > 0: # catalog.service returns an empty array if service not found
+ service = val[0] # Could be multiple listings, but we take the first
+ if ('ServiceAddress' in service) and (len(service['ServiceAddress']) > 0):
+ service_address = service['ServiceAddress'] # Most services should have this
+ else:
+ service_address = service['Address'] # "External" services will have this only
+ service_port = service['ServicePort']
+ else:
+ raise Exception('Could not find service information for "{0}"'.format(service_name))
+
+ return service_address, service_port
+
+ def add_to_entry(self, key, add_name, add_value):
+ '''
+ Find 'key' in consul.
+ Treat its value as a JSON string representing a dict.
+ Extend the dict by adding an entry with key 'add_name' and value 'add_value'.
+ Turn the resulting extended dict into a JSON string.
+ Store the string back into Consul under 'key'.
+ Watch out for conflicting concurrent updates.
+
+ Example:
+ Key 'xyz:dmaap' has the value '{"feed00": {"feed_url" : "http://example.com/feeds/999"}}'
+ add_to_entry('xyz:dmaap', 'topic00', {'topic_url' : 'http://example.com/topics/1229'})
+ should result in the value for key 'xyz:dmaap' in consul being updated to
+ '{"feed00": {"feed_url" : "http://example.com/feeds/999"}, "topic00" : {"topic_url" : "http://example.com/topics/1229"}}'
+ '''
+
+ while True: # do until update succeeds
+ (index, val) = self.ch.kv.get(key) # index gives version of key retrieved
+
+ if val is None: # no key yet
+ vstring = '{}'
+ mod_index = 0 # Use 0 as the cas index for initial insertion of the key
+ else:
+ vstring = val['Value']
+ mod_index = val['ModifyIndex']
+
+ # Build the updated dict
+ # Exceptions just propagate
+ v = json.loads(vstring)
+ v[add_name] = add_value
+ new_vstring = json.dumps(v)
+
+ updated = self.ch.kv.put(key, new_vstring, cas=mod_index) # if the key has changed since retrieval, this will return false
+ if updated:
+ break
+
+
+ def delete_entry(self,entry_name):
+ '''
+ Delete an entire key-value entry whose key is 'entry_name' from the Consul KV store.
+
+ Note that the kv.delete() operation always returns True,
+ whether there's an entry with key 'entry_name' exists or not. This doesn't seem like
+ a great design, but it means it's safe to try to delete the same entry repeatedly.
+
+ Note also in our application for this plugin, the uninstall workflow will always delete all of the topics and
+ feeds we've stored into the 'component_name:dmaap' entry.
+
+ Given the two foregoing notes, it is safe for this plugin to attempt to delete the entire
+ 'component_name:dmaap' entry any time it performs an 'unlink' operation for a publishes or
+ subscribes relationship. The first unlink will actually remove the entry, the subsequent ones
+ will harmlessly try to remove it again.
+
+ The 'correct' approach would be to have a delete_from_entry(self, key, delete_name) that fetches
+ the entry from Consul, removes only the topic or feed being unlinked, and then puts the resulting
+ entry back into Consul. It would be very similar to add_from_entry. When there's nothing left
+ in the entry, then the entire entry would be deleted.
+ '''
+ self.ch.kv.delete(entry_name)
diff --git a/dmaap/dmaap.yaml b/dmaap/dmaap.yaml
new file mode 100644
index 0000000..5b79f9b
--- /dev/null
+++ b/dmaap/dmaap.yaml
@@ -0,0 +1,202 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+
+# Types and relationships for DMaaP data router feeds
+
+tosca_definitions_version: cloudify_dsl_1_3
+
+plugins:
+ dmaapplugin:
+ executor: 'central_deployment_agent'
+ package_name: dmaap
+ package_version: 1.4.0
+
+
+node_types:
+
+ # Data Router feed to be created
+ dcaegen2.nodes.Feed:
+ derived_from: cloudify.nodes.Root
+
+ properties:
+ feed_name:
+ type: string
+ required: false
+ feed_version:
+ type: string
+ required: false
+ feed_description:
+ type: string
+ required: false
+ aspr_classification:
+ type: string
+ required: false
+ useExisting:
+ type: boolean
+ required: false
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation:
+ dmaapplugin.dmaapplugin.dr_lifecycle.create_feed
+ delete:
+ implementation:
+ dmaapplugin.dmaapplugin.dr_lifecycle.delete_feed
+
+ # Existing Data Router feed to be used as target for publishing/subscribing
+ dcaegen2.nodes.ExistingFeed:
+ derived_from: cloudify.nodes.Root
+
+ properties:
+ feed_id:
+ type: string
+ required: false
+ feed_name:
+ type: string
+ required: false
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ configure:
+ implementation:
+ dmaapplugin.dmaapplugin.dr_lifecycle.get_existing_feed
+
+ # Existing Global Data Router feed (created via Invenio) to be used as target for bridging
+ dcaegen2.nodes.ExternalTargetFeed:
+ derived_from: cloudify.nodes.Root
+
+ properties:
+ url:
+ type: string
+ required: true
+ username:
+ type: string
+ required: true
+ userpw:
+ type: string
+ required: true
+
+ # Global Data Router feed to be used as a source for bridging
+ # Has no properties
+ dcaegen2.nodes.ExternalSourceFeed:
+ derived_from: cloudify.nodes.Root
+
+ # Message Router topic to be created
+ dcaegen2.nodes.Topic:
+ derived_from: cloudify.nodes.Root
+
+ properties:
+ topic_name:
+ type: string
+ required: false
+ topic_description:
+ type: string
+ required: false
+ txenable:
+ type: boolean
+ required: false
+ replication_case:
+ type: string
+ required: false
+ global_mr_url:
+ type: string
+ required: false
+ useExisting:
+ type: boolean
+ required: false
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation:
+ dmaapplugin.dmaapplugin.mr_lifecycle.create_topic
+ delete:
+ implementation:
+ dmaapplugin.dmaapplugin.mr_lifecycle.delete_topic
+
+ # Existing Message Router topic to be used as target for publishing/subscribing
+ dcaegen2.nodes.ExistingTopic:
+ derived_from: cloudify.nodes.Root
+
+ properties:
+ fqtn:
+ type: string
+ required: false
+ topic_name:
+ type: string
+ required: false
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ configure:
+ implementation:
+ dmaapplugin.dmaapplugin.mr_lifecycle.get_existing_topic
+
+relationships:
+
+ dcaegen2.relationships.publish_files:
+ derived_from: cloudify.relationships.connected_to
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure: dmaapplugin.dmaapplugin.dr_relationships.add_dr_publisher
+ unlink: dmaapplugin.dmaapplugin.dr_relationships.delete_dr_publisher
+
+ dcaegen2.relationships.subscribe_to_files:
+ derived_from: cloudify.relationships.connected_to
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure: dmaapplugin.dmaapplugin.dr_relationships.add_dr_subscriber
+ unlink: dmaapplugin.dmaapplugin.dr_relationships.delete_dr_subscriber
+
+ dcaegen2.relationships.publish_events:
+ derived_from: cloudify.relationships.connected_to
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure: dmaapplugin.dmaapplugin.mr_relationships.add_mr_publisher
+ unlink: dmaapplugin.dmaapplugin.mr_relationships.delete_mr_client
+
+ dcaegen2.relationships.subscribe_to_events:
+ derived_from: cloudify.relationships.connected_to
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure: dmaapplugin.dmaapplugin.mr_relationships.add_mr_subscriber
+ unlink: dmaapplugin.dmaapplugin.mr_relationships.delete_mr_client
+
+ dcaegen2.relationships.bridges_to:
+ derived_from: cloudify.relationships.connected_to
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure: dmaapplugin.dmaapplugin.dr_bridge.create_dr_bridge
+ unlink: dmaapplugin.dmaapplugin.dr_bridge.remove_dr_bridge
+
+ dcaegen2.relationships.bridges_to_external:
+ derived_from: cloudify.relationships.connected_to
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure: dmaapplugin.dmaapplugin.dr_bridge.create_external_dr_bridge
+ unlink: dmaapplugin.dmaapplugin.dr_bridge.remove_dr_bridge
+
+ dcaegen2.relationships.bridges_from_external_to_internal:
+ derived_from: cloudify.relationships.connected_to
+ target_interfaces:
+ cloudify.interfaces.relationship_lifecycle:
+ preconfigure: dmaapplugin.dmaapplugin.dr_bridge.create_external_source_dr_bridge
+ unlink: dmaapplugin.dmaapplugin.dr_bridge.remove_dr_bridge
+
diff --git a/dmaap/dmaapcontrollerif/__init__.py b/dmaap/dmaapcontrollerif/__init__.py
new file mode 100644
index 0000000..611169f
--- /dev/null
+++ b/dmaap/dmaapcontrollerif/__init__.py
@@ -0,0 +1 @@
+# DMaaP Bus Controller interface library \ No newline at end of file
diff --git a/dmaap/dmaapcontrollerif/dmaap_requests.py b/dmaap/dmaapcontrollerif/dmaap_requests.py
new file mode 100644
index 0000000..039643d
--- /dev/null
+++ b/dmaap/dmaapcontrollerif/dmaap_requests.py
@@ -0,0 +1,310 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+import requests
+
+### "Constants"
+FEEDS_PATH = '/feeds'
+PUBS_PATH = '/dr_pubs'
+SUBS_PATH = '/dr_subs'
+TOPICS_PATH = '/topics'
+CLIENTS_PATH = '/mr_clients'
+LOCATIONS_PATH = '/dcaeLocations'
+
+class DMaaPControllerHandle(object):
+ '''
+ A simple wrapper class to map DMaaP bus controller API calls into operations supported by the requests module
+ '''
+
+ def __init__(self, api_url, user, password, logger,
+ feeds_path = FEEDS_PATH,
+ pubs_path = PUBS_PATH,
+ subs_path = SUBS_PATH,
+ topics_path = TOPICS_PATH,
+ clients_path = CLIENTS_PATH):
+ '''
+ Constructor
+ '''
+ self.api_url = api_url # URL for the root of the Controller resource tree, no trailing "/"
+ self.auth = (user, password) # user name and password for HTTP basic auth
+ self.logger = logger
+ self.feeds_path = feeds_path
+ self.pubs_path = pubs_path
+ self.subs_path = subs_path
+ self.topics_path = topics_path
+ self.clients_path = clients_path
+
+
+ ### INTERNAL FUNCTIONS ###
+
+ def _make_url(self, path):
+ '''
+ Make a full URL given the path relative to the root
+ '''
+ if not path.startswith('/'):
+ path = '/' + path
+
+ return self.api_url + path
+
+ def _get_resource(self, path):
+ '''
+ Get the DMaaP resource at path, where path is relative to the root.
+ '''
+ url = self._make_url(path)
+ self.logger.info("Querying URL: {0}".format(url))
+ return requests.get(url, auth=self.auth)
+
+ def _create_resource(self, path, resource_content):
+ '''
+ Create a DMaaP resource by POSTing to the resource collection
+ identified by path (relative to root), using resource_content as the body of the post
+ '''
+ url = self._make_url(path)
+ self.logger.info("Posting to URL: {0} with body: {1}".format(url, resource_content))
+ return requests.post(url, auth=self.auth, json=resource_content)
+
+ def _delete_resource(self, path):
+ '''
+ Delete the DMaaP resource at path, where path is relative to the root.
+ '''
+ url = self._make_url(path)
+ self.logger.info("Deleting URL: {0}".format(url))
+ return requests.delete(url, auth=self.auth)
+
+ ### PUBLIC API ###
+
+ # Data Router Feeds
+ def create_feed(self, name, version=None, description=None, aspr_class=None, owner=None, useExisting=None):
+ '''
+ Create a DMaaP data router feed with the given feed name
+ and (optionally) feed version, feed description, ASPR classification,
+ owner, and useExisting flag
+ '''
+ feed_definition = {'feedName' : name}
+ if version:
+ feed_definition['feedVersion'] = version
+ if description:
+ feed_definition['feedDescription'] = description
+ if aspr_class:
+ feed_definition['asprClassification'] = aspr_class
+ if owner:
+ feed_definition['owner'] = owner
+ feeds_path_query = self.feeds_path
+ if useExisting == True: # It's a boolean!
+ feeds_path_query += "?useExisting=true"
+
+ return self._create_resource(feeds_path_query, feed_definition)
+
+ def get_feed_info(self, feed_id):
+ '''
+ Get the representation of the DMaaP data router feed whose feed id is feed_id.
+ '''
+ return self._get_resource("{0}/{1}".format(self.feeds_path, feed_id))
+
+ def get_feed_info_by_name(self, feed_name):
+ '''
+ Get the representation of the DMaaP data router feed whose feed name is feed_name.
+ '''
+ feeds = self._get_resource("{0}".format(self.feeds_path))
+ feed_list = feeds.json()
+ for feed in feed_list:
+ if feed["feedName"] == feed_name:
+ self.logger.info("Found feed with {0}".format(feed_name))
+ feed_id = feed["feedId"]
+ return self._get_resource("{0}/{1}".format(self.feeds_path, feed_id))
+
+ self.logger.info("feed_name {0} not found".format(feed_name))
+ return None
+
+ def delete_feed(self, feed_id):
+ '''
+ Delete the DMaaP data router feed whose feed id is feed_id.
+ '''
+ return self._delete_resource("{0}/{1}".format(self.feeds_path, feed_id))
+
+ # Data Router Publishers
+ def add_publisher(self, feed_id, location, username, password, status=None):
+ '''
+ Add a publisher to feed feed_id at location location with user, pass, and status
+ '''
+ publisher_definition = {
+ 'feedId' : feed_id,
+ 'dcaeLocationName' : location,
+ 'username' : username,
+ 'userpwd' : password
+ }
+
+ if status:
+ publisher_definition['status'] = status
+
+ return self._create_resource(self.pubs_path, publisher_definition)
+
+ def get_publisher_info(self, pub_id):
+ '''
+ Get the representation of the DMaaP data router publisher whose publisher id is pub_id
+ '''
+ return self._get_resource("{0}/{1}".format(self.pubs_path, pub_id))
+
+ def delete_publisher(self, pub_id):
+ '''
+ Delete the DMaaP data router publisher whose publisher id is id.
+ '''
+ return self._delete_resource("{0}/{1}".format(self.pubs_path, pub_id))
+
+
+ # Data Router SUbscrihers
+ def add_subscriber(self, feed_id, location, delivery_url, username, password, decompress, privileged, status=None):
+ '''
+ Add a publisher to feed feed_id at location location with user, pass, and status
+ '''
+ subscriber_definition = {
+ 'feedId' : feed_id,
+ 'dcaeLocationName' : location,
+ 'deliveryURL' : delivery_url,
+ 'username' : username,
+ 'userpwd' : password,
+ 'decompress': decompress,
+ 'privilegedSubscriber': privileged
+ }
+
+ if status:
+ subscriber_definition['status'] = status
+
+ return self._create_resource(self.subs_path, subscriber_definition)
+
+ def get_subscriber_info(self, sub_id):
+ '''
+ Get the representation of the DMaaP data router subscriber whose subscriber id is sub_id
+ '''
+ return self._get_resource("{0}/{1}".format(self.subs_path, sub_id))
+
+ def delete_subscriber(self, sub_id):
+ '''
+ Delete the DMaaP data router subscriber whose subscriber id is sub_id.
+ '''
+ return self._delete_resource("{0}/{1}".format(self.subs_path, sub_id))
+
+ # Message router topics
+ def create_topic(self, name, description = None, txenable = None, owner = None, replication_case = None, global_mr_url = None, useExisting = None):
+ '''
+ Create a message router topic with the topic name 'name' and optionally the topic_description
+ 'description', the 'txenable' flag, the 'useExisting' flag and the topic owner 'owner'.
+ '''
+ topic_definition = {'topicName' : name}
+ if description:
+ topic_definition['topicDescription'] = description
+ if owner:
+ topic_definition['owner'] = owner
+ if txenable != None: # It's a boolean!
+ topic_definition['txenable'] = txenable
+ if replication_case:
+ topic_definition['replicationCase'] = replication_case
+ if global_mr_url:
+ topic_definition['globalMrURL'] = global_mr_url
+ topics_path_query = self.topics_path
+ if useExisting == True: # It's a boolean!
+ topics_path_query += "?useExisting=true"
+
+ return self._create_resource(topics_path_query, topic_definition)
+
+ def get_topic_info(self, fqtn):
+ '''
+ Get information about the topic whose fully-qualified name is 'fqtn'
+ '''
+ return self._get_resource("{0}/{1}".format(self.topics_path, fqtn))
+
+ def get_topic_fqtn_by_name(self, topic_name):
+ '''
+ Get the representation of the DMaaP message router topic fqtn whose topic name is topic_name.
+ '''
+ topics = self._get_resource("{0}".format(self.topics_path))
+ topic_list = topics.json()
+ for topic in topic_list:
+ if topic["topicName"] == topic_name:
+ self.logger.info("Found existing topic with name {0}".format(topic_name))
+ fqtn = topic["fqtn"]
+ return fqtn
+
+ self.logger.info("topic_name {0} not found".format(topic_name))
+ return None
+
+ def delete_topic(self, fqtn):
+ '''
+ Delete the topic whose fully qualified name is 'fqtn'
+ '''
+ return self._delete_resource("{0}/{1}".format(self.topics_path, fqtn))
+
+ # Message route clients (publishers and subscribers
+ def create_client(self, fqtn, location, client_role, actions):
+ '''
+ Creates a client authorized to access the topic with fully-qualified name 'fqtn',
+ from the location 'location', using the AAF client role 'client_role'. The
+ client is authorized to perform actions in the list 'actions'. (Valid
+ values are 'pub', 'sub', and 'view'
+ '''
+ client_definition = {
+ 'fqtn' : fqtn,
+ 'dcaeLocationName' : location,
+ 'clientRole' : client_role,
+ 'action' : actions
+ }
+ return self._create_resource(self.clients_path, client_definition)
+
+ def get_client_info(self, client_id):
+ '''
+ Get client information for the client whose client ID is 'client_id'
+ '''
+ return self._get_resource("{0}/{1}".format(self.clients_path, client_id))
+
+ def delete_client(self, client_id):
+ '''
+ Delete the client whose client ID is 'client_id'
+ '''
+ return self._delete_resource("{0}/{1}".format(self.clients_path, client_id))
+
+ def get_dcae_locations(self, dcae_layer):
+ '''
+ Get the list of location names known to the DMaaP bus controller
+ whose "dcaeLayer" property matches dcae_layer and whose status is "VALID".
+ '''
+ # Do these as a separate step so things like 404 get reported precisely
+ locations = self._get_resource(LOCATIONS_PATH)
+ locations.raise_for_status()
+
+ # pull out location names for VALID locations with matching dcae_layer
+ return [location["dcaeLocationName"] for location in locations.json()
+ if location['dcaeLayer'] == dcae_layer
+ and location['status'] == 'VALID']
+
+ def get_dcae_central_locations(self):
+ '''
+ Get the list of location names known to the DMaaP bus controller
+ whose "dcaeLayer" property contains "central" (ignoring case)
+ and whose status is "VALID".
+ "dcaeLayer" contains "central" for central sites.
+ '''
+ # Do these as a separate step so things like 404 get reported precisely
+ locations = self._get_resource(LOCATIONS_PATH)
+ locations.raise_for_status()
+
+ # pull out location names for VALID central locations
+ return [location["dcaeLocationName"] for location in locations.json()
+ if 'central' in location['dcaeLayer'].lower()
+ and location['status'] == 'VALID']
+
diff --git a/dmaap/dmaapplugin/__init__.py b/dmaap/dmaapplugin/__init__.py
new file mode 100644
index 0000000..7a760d7
--- /dev/null
+++ b/dmaap/dmaapplugin/__init__.py
@@ -0,0 +1,82 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+## Get parameters for accessing the DMaaP controller
+from consulif.consulif import ConsulHandle
+from cloudify.exceptions import NonRecoverableError
+import os
+
+os.environ["REQUESTS_CA_BUNDLE"]="/opt/onap/certs/cacert.pem" # This is to handle https request thru plugin
+
+CONSUL_HOST = "consul" # Should always be a local consul agent on Cloudify Manager
+DBCL_KEY_NAME = "dmaap-plugin" # Consul key containing DMaaP data bus credentials
+# In the ONAP Kubernetes environment, bus controller address is always "dmaap-bc", on port 8080 (http) and 8443 (https)
+ONAP_SERVICE_ADDRESS = "dmaap-bc"
+HTTP_PORT = "8080"
+HTTPS_PORT = "8443"
+
+try:
+ _ch = ConsulHandle("http://{0}:8500".format(CONSUL_HOST), None, None, None)
+except Exception as e:
+ raise NonRecoverableError("Error getting ConsulHandle when configuring dmaap plugin: {0}".format(e))
+
+try:
+ config = _ch.get_config(DBCL_KEY_NAME)
+except Exception as e:
+ raise NonRecoverableError("Error getting config for '{0}' from ConsulHandle when configuring dmaap plugin: {1}".format(DBCL_KEY_NAME, e))
+
+try:
+ DMAAP_USER = config['dmaap']['username']
+except Exception as e:
+ raise NonRecoverableError("Error setting DMAAP_USER while configuring dmaap plugin: {0}".format(e))
+
+try:
+ DMAAP_PASS = config['dmaap']['password']
+except Exception as e:
+ raise NonRecoverableError("Error setting DMAAP_PASS while configuring dmaap plugin: {0}".format(e))
+
+try:
+ DMAAP_OWNER = config['dmaap']['owner']
+except Exception as e:
+ raise NonRecoverableError("Error setting DMAAP_OWNER while configuring dmaap plugin: {0}".format(e))
+
+try:
+ if 'protocol' in config['dmaap']:
+ DMAAP_PROTOCOL = config['dmaap']['protocol']
+ service_port = HTTP_PORT
+ else:
+ DMAAP_PROTOCOL = 'https' # Default to https (service discovery should give us this but doesn't
+ service_port = HTTPS_PORT
+except Exception as e:
+ raise NonRecoverableError("Error setting DMAAP_PROTOCOL while configuring dmaap plugin: {0}".format(e))
+
+try:
+ if 'path' in config['dmaap']:
+ DMAAP_PATH = config['dmaap']['path']
+ else:
+ DMAAP_PATH = 'webapi' # SHould come from service discovery but Consul doesn't support it
+except Exception as e:
+ raise NonRecoverableError("Error setting DMAAP_PATH while configuring dmaap plugin: {0}".format(e))
+
+try:
+ service_address = ONAP_SERVICE_ADDRESS
+ DMAAP_API_URL = '{0}://{1}:{2}/{3}'.format(DMAAP_PROTOCOL, service_address, service_port, DMAAP_PATH)
+except Exception as e:
+ raise NonRecoverableError("Error setting DMAAP_API_URL while configuring dmaap plugin: {0}".format(e))
+
diff --git a/dmaap/dmaapplugin/dmaaputils.py b/dmaap/dmaapplugin/dmaaputils.py
new file mode 100644
index 0000000..262abcb
--- /dev/null
+++ b/dmaap/dmaapplugin/dmaaputils.py
@@ -0,0 +1,29 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+# Utility functions
+
+import string
+from random import SystemRandom
+
+def random_string(n):
+ '''
+ Create a random alphanumeric string, n characters long.
+ '''
+ secureRandomGen = SystemRandom()
+ return ''.join(secureRandomGen.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(n))
diff --git a/dmaap/dmaapplugin/dr_bridge.py b/dmaap/dmaapplugin/dr_bridge.py
new file mode 100644
index 0000000..a188667
--- /dev/null
+++ b/dmaap/dmaapplugin/dr_bridge.py
@@ -0,0 +1,199 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from dmaapplugin import DMAAP_API_URL, DMAAP_USER, DMAAP_PASS
+from dmaapplugin.dmaaputils import random_string
+from dmaapcontrollerif.dmaap_requests import DMaaPControllerHandle
+
+# Set up a subscriber to a source feed
+def _set_up_subscriber(dmc, source_feed_id, loc, delivery_url, username, userpw):
+ # Add subscriber to source feed
+ add_sub = dmc.add_subscriber(source_feed_id, loc, delivery_url, username, userpw)
+ add_sub.raise_for_status()
+ return add_sub.json()
+
+# Set up a publisher to a target feed
+def _set_up_publisher(dmc, target_feed_id, loc):
+ username = random_string(8)
+ userpw = random_string(16)
+ add_pub = dmc.add_publisher(target_feed_id, loc, username, userpw)
+ add_pub.raise_for_status()
+ pub_info = add_pub.json()
+ return pub_info["pubId"], username, userpw
+
+# Get a central location to use when creating a publisher or subscriber
+def _get_central_location(dmc):
+ locations = dmc.get_dcae_central_locations()
+ if len(locations) < 1:
+ raise Exception('No central location found for setting up DR bridging')
+ return locations[0] # We take the first one. Typically there will be two central locations
+
+
+# Set up a "bridge" between two feeds internal to DCAE
+# A source feed "bridges_to" a target feed, meaning that anything published to
+# the source feed will be delivered to subscribers to the target feed (as well as
+# to subscribers of the source feed).
+#
+# The bridge is established by first adding a publisher to the target feed. The result of doing this
+# is a publish URL and a set of publication credentials.
+#The publish URL and publication credentials are used to set up a subscriber to the source feed.
+#I.e., we tell the source feed to deliver to an endpoint which is actually a publish
+# endpoint for the target feed.
+@operation
+def create_dr_bridge(**kwargs):
+
+ try:
+
+ # Get source and target feed ids
+ if 'feed_id' in ctx.target.instance.runtime_properties:
+ target_feed_id = ctx.target.instance.runtime_properties['feed_id']
+ else:
+ raise Exception('Target feed has no feed_id property')
+ if 'feed_id' in ctx.source.instance.runtime_properties:
+ source_feed_id = ctx.source.instance.runtime_properties['feed_id']
+ else:
+ raise Exception('Source feed has no feed_id property')
+
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+
+ # Get a location to use when creating a publisher or subscriber--a central location seems reasonable
+ loc = _get_central_location(dmc)
+
+ ctx.logger.info('Creating bridge from feed {0} to feed {1} using location {2}'.format(source_feed_id, target_feed_id, loc))
+
+ # Add publisher to target feed
+ publisher_id, username, userpw = _set_up_publisher(dmc, target_feed_id, loc)
+ ctx.logger.info("Added publisher id {0} to target feed {1} with user {2}".format(publisher_id, target_feed_id, username))
+
+ # Add subscriber to source feed
+ delivery_url = ctx.target.instance.runtime_properties['publish_url']
+ subscriber_info = _set_up_subscriber(dmc, source_feed_id, loc, delivery_url, username, userpw)
+ subscriber_id = subscriber_info["subId"]
+ ctx.logger.info("Added subscriber id {0} to source feed {1} with delivery url {2}".format(subscriber_id, source_feed_id, delivery_url))
+
+ # Save the publisher and subscriber IDs on the source node, indexed by the target node id
+ ctx.source.instance.runtime_properties[ctx.target.node.id] = {"publisher_id": publisher_id, "subscriber_id": subscriber_id}
+
+ except Exception as e:
+ ctx.logger.error("Error creating bridge: {0}".format(e))
+ raise NonRecoverableError(e)
+
+# Set up a bridge from an internal DCAE feed to a feed in an external Data Router system
+# The target feed needs to be provisioned in the external Data Router system. A publisher
+# to that feed must also be set up in the external Data Router system. The publish URL,
+# username, and password need to be captured in a target node of type dcae.nodes.ExternalTargetFeed.
+# The bridge is established by setting up a subscriber to the internal DCAE source feed using the
+# external feed publisher parameters as delivery parameters for the subscriber.
+@operation
+def create_external_dr_bridge(**kwargs):
+ try:
+
+ # Make sure target feed has full set of properties
+ if 'url' in ctx.target.node.properties and 'username' in ctx.target.node.properties and 'userpw' in ctx.target.node.properties:
+ url = ctx.target.node.properties['url']
+ username = ctx.target.node.properties['username']
+ userpw = ctx.target.node.properties['userpw']
+ else:
+ raise Exception ("Target feed missing url, username, and/or user pw")
+
+ # Make sure source feed has a feed ID
+ if 'feed_id' in ctx.source.instance.runtime_properties:
+ source_feed_id = ctx.source.instance.runtime_properties['feed_id']
+ else:
+ raise Exception('Source feed has no feed_id property')
+
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+
+ # Get a central location to use when creating subscriber
+ loc = _get_central_location(dmc)
+
+ ctx.logger.info('Creating external bridge from feed {0} to external url {1} using location {2}'.format(source_feed_id, url, loc))
+
+ # Create subscription to source feed using properties of the external target feed
+ subscriber_info = _set_up_subscriber(dmc, source_feed_id, loc, url, username, userpw)
+ subscriber_id = subscriber_info["subId"]
+ ctx.logger.info("Added subscriber id {0} to source feed {1} with delivery url {2}".format(subscriber_id, source_feed_id, url))
+
+ # Save the subscriber ID on the source node, indexed by the target node id
+ ctx.source.instance.runtime_properties[ctx.target.node.id] = {"subscriber_id": subscriber_id}
+
+ except Exception as e:
+ ctx.logger.error("Error creating external bridge: {0}".format(e))
+ raise NonRecoverableError(e)
+
+# Set up a bridge from a feed in an external Data Router system to an internal DCAE feed.
+# The bridge is established by creating a publisher on the internal DCAE feed. Then a subscription
+# to the external feed is created through manual provisioning in the external Data Router system, using
+# the publish URL and the publisher username and password for the internal feed as the delivery parameters
+# for the external subscription.
+# In order to obtain the publish URL, publisher username, and password, a blueprint using this sort of
+# bridge will typically have an output that exposes the runtime_property set on the source node in this operation.
+@operation
+def create_external_source_dr_bridge(**kwargs):
+ try:
+ # Get target feed id
+ if 'feed_id' in ctx.target.instance.runtime_properties:
+ target_feed_id = ctx.target.instance.runtime_properties['feed_id']
+ else:
+ raise Exception('Target feed has no feed_id property')
+
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+
+ # Get a central location to use when creating a publisher
+ loc = _get_central_location(dmc)
+
+ # Create a publisher on the target feed
+ publisher_id, username, userpw = _set_up_publisher(dmc, target_feed_id, loc)
+
+ # Save the publisher info on the source node, indexed by the target node
+ ctx.source.instance.runtime_properties[ctx.target.node.id] = {"publisher_id": publisher_id, "url": ctx.target.instance.runtime_properties["publish_url"], "username": username, "userpw": userpw}
+
+ except Exception as e:
+ ctx.logger.error("Error creating external source bridge: {0}".format(e))
+
+# Remove the bridge between the relationship source and target.
+# For a bridge between 2 internal feeds, deletes the subscriber on the source feed and the publisher on the target feed.
+# For a bridge to an external target feed, deletes the subscriber on the source feed.
+# For a bridge from an external source feed, deletes the publisher on the target feed.
+@operation
+def remove_dr_bridge(**kwargs):
+ try:
+
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+
+ if ctx.target.node.id in ctx.source.instance.runtime_properties:
+
+ if 'subscriber_id' in ctx.source.instance.runtime_properties[ctx.target.node.id]:
+ # Delete the subscription for this bridge
+ ctx.logger.info("Removing bridge -- deleting subscriber {0}".format(ctx.source.instance.runtime_properties[ctx.target.node.id]['subscriber_id']))
+ dmc.delete_subscriber(ctx.source.instance.runtime_properties[ctx.target.node.id]['subscriber_id'])
+
+ if 'publisher_id' in ctx.source.instance.runtime_properties:
+ # Delete the publisher for this bridge
+ ctx.logger.info("Removing bridge -- deleting publisher {0}".format(ctx.source.instance.runtime_properties[ctx.target.node.id]['publisher_id']))
+ dmc.delete_publisher(ctx.source.instance.runtime_properties[ctx.target.node.id]['publisher_id'])
+
+ ctx.logger.info("Remove bridge from {0} to {1}".format(ctx.source.node.id, ctx.target.node.id))
+
+ except Exception as e:
+ ctx.logger.error("Error removing bridge: {0}".format(e))
+ # Let the uninstall workflow proceed--don't throw a NonRecoverableError
diff --git a/dmaap/dmaapplugin/dr_lifecycle.py b/dmaap/dmaapplugin/dr_lifecycle.py
new file mode 100644
index 0000000..af37977
--- /dev/null
+++ b/dmaap/dmaapplugin/dr_lifecycle.py
@@ -0,0 +1,153 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from dmaapplugin import DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, DMAAP_OWNER
+from dmaapplugin.dmaaputils import random_string
+from dmaapcontrollerif.dmaap_requests import DMaaPControllerHandle
+
+# Lifecycle operations for DMaaP Data Router feeds
+
+@operation
+def create_feed(**kwargs):
+ '''
+ Create a new data router feed
+ Expects "feed_name" to be set in node properties
+ If 'feed_name' is not set or is empty, generates a random one.
+ Allows "feed_version", "feed_description", "aspr_classification" and "useExisting" as optional properties
+ (Sets default values if not provided )
+ Sets instance runtime properties:
+ Note that 'useExisting' is a flag indicating whether DBCL will use existing feed if the feed already exists.
+ - "feed_id"
+ - "publish_url"
+ - "log_url"
+
+ '''
+ try:
+ # Make sure there's a feed_name
+ feed_name = ctx.node.properties.get("feed_name")
+ if not (feed_name and feed_name.strip()):
+ feed_name = random_string(12)
+
+ # Set defaults/placeholders for the optional properties for the feed
+ if "feed_version" in ctx.node.properties:
+ feed_version = ctx.node.properties["feed_version"]
+ else:
+ feed_version = "0.0"
+ if "feed_description" in ctx.node.properties:
+ feed_description = ctx.node.properties["feed_description"]
+ else:
+ feed_description = "No description provided"
+ if "aspr_classification" in ctx.node.properties:
+ aspr_classification = ctx.node.properties["aspr_classification"]
+ else:
+ aspr_classification = "unclassified"
+ if "useExisting" in ctx.node.properties:
+ useExisting = ctx.node.properties["useExisting"]
+ else:
+ useExisting = False
+
+ # Make the request to the controller
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ ctx.logger.info("Attempting to create feed name {0}".format(feed_name))
+ f = dmc.create_feed(feed_name, feed_version, feed_description, aspr_classification, DMAAP_OWNER, useExisting)
+ f.raise_for_status()
+
+ # Capture important properties from the result
+ feed = f.json()
+ ctx.instance.runtime_properties["feed_id"] = feed["feedId"]
+ ctx.instance.runtime_properties["publish_url"] = feed["publishURL"]
+ ctx.instance.runtime_properties["log_url"] = feed["logURL"]
+ ctx.logger.info("Created feed name {0} with feed id {1}".format(feed_name, feed["feedId"]))
+
+ except Exception as e:
+ ctx.logger.error("Error creating feed: {er}".format(er=e))
+ raise NonRecoverableError(e)
+
+
+@operation
+def get_existing_feed(**kwargs):
+ '''
+ Find information for an existing data router feed
+ Expects "feed_id" to be set in node properties -- uniquely identifies the feed
+ Sets instance runtime properties:
+ - "feed_id"
+ - "publish_url"
+ - "log_url"
+ '''
+
+ try:
+ # Make the lookup request to the controller
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ ctx.logger.info("DMaaPControllerHandle() returned")
+ feed_id_input = False
+ if "feed_id" in ctx.node.properties:
+ feed_id_input = True
+ f = dmc.get_feed_info(ctx.node.properties["feed_id"])
+ elif "feed_name" in ctx.node.properties:
+ feed_name = ctx.node.properties["feed_name"]
+ f = dmc.get_feed_info_by_name(feed_name)
+ if f is None:
+ ctx.logger.error("Not find existing feed with feed name {0}".format(feed_name))
+ raise ValueError("Not find existing feed with feed name " + feed_name)
+ else:
+ raise ValueError("Either feed_id or feed_name must be defined to get existing feed")
+
+ f.raise_for_status()
+
+ # Capture important properties from the result
+ feed = f.json()
+ feed_id = feed["feedId"]
+ ctx.instance.runtime_properties["feed_id"] = feed_id # Just to be consistent with newly-created node, above
+ ctx.instance.runtime_properties["publish_url"] = feed["publishURL"]
+ ctx.instance.runtime_properties["log_url"] = feed["logURL"]
+ if feed_id_input:
+ ctx.logger.info("Found existing feed with feed id {0}".format(ctx.node.properties["feed_id"]))
+ else:
+ ctx.logger.info("Found existing feed with feed name {0}".format(ctx.node.properties["feed_name"]))
+
+ except ValueError as e:
+ ctx.logger.error("{er}".format(er=e))
+ raise NonRecoverableError(e)
+ except Exception as e:
+ if feed_id_input:
+ ctx.logger.error("Error getting existing feed id {id}: {er}".format(id=ctx.node.properties["feed_id"],er=e))
+ else:
+ ctx.logger.error("Error getting existing feed name {name}: {er}".format(name=ctx.node.properties["feed_name"],er=e))
+ raise NonRecoverableError(e)
+
+
+@operation
+def delete_feed(**kwargs):
+ '''
+ Delete a feed
+ Expects "feed_id" to be set on the instance's runtime properties
+ '''
+ try:
+ # Make the lookup request to the controllerid=ctx.node.properties["feed_id"]
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ f = dmc.delete_feed(ctx.instance.runtime_properties["feed_id"])
+ f.raise_for_status()
+ ctx.logger.info("Deleting feed id {0}".format(ctx.instance.runtime_properties["feed_id"]))
+
+ except Exception as e:
+ ctx.logger.error("Error deleting feed id {id}: {er}".format(id=ctx.instance.runtime_properties["feed_id"],er=e))
+ # don't raise a NonRecoverable error here--let the uninstall workflow continue
diff --git a/dmaap/dmaapplugin/dr_relationships.py b/dmaap/dmaapplugin/dr_relationships.py
new file mode 100644
index 0000000..f1ff986
--- /dev/null
+++ b/dmaap/dmaapplugin/dr_relationships.py
@@ -0,0 +1,219 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from dmaapplugin import DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, CONSUL_HOST
+from dmaapplugin.dmaaputils import random_string
+from dmaapcontrollerif.dmaap_requests import DMaaPControllerHandle
+from consulif.consulif import ConsulHandle
+
+# Lifecycle operations for DMaaP Data Router
+# publish and subscribe relationships
+
+@operation
+def add_dr_publisher(**kwargs):
+ '''
+ Sets up the source of the publishes_relationship as a publisher to the feed that
+ is the target of the relationship
+ Assumes target (the feed) has the following runtime properties set
+ - feed_id
+ - log_url
+ - publish_url
+ Assumes source (the publisher) has a runtime property whose name matches the node name of the feed.
+ This is a dictionary containing one property:
+ - location (the dcaeLocationName to pass when adding the publisher to the feed)
+ Generates a user name and password that the publisher will need to use when publishing
+ Adds the following properties to the dictionary above:
+ - publish_url
+ - log_url
+ - username
+ - password
+ '''
+ try:
+ # Make sure we have a name under which to store DMaaP configuration
+ # Check early so we don't needlessly create DMaaP entities
+ if 'service_component_name' not in ctx.source.instance.runtime_properties:
+ raise Exception("Source node does not have 'service_component_name' in runtime_properties")
+
+ target_feed = ctx.target.node.id
+ ctx.logger.info("Attempting to add publisher {0} to feed {1}".format(ctx.source.node.id, target_feed))
+
+ # Set up the parameters for the add_publisher request to the DMaaP bus controller
+ feed_id = ctx.target.instance.runtime_properties["feed_id"]
+ location = ctx.source.instance.runtime_properties[target_feed]["location"]
+ username = random_string(8)
+ password = random_string(16)
+
+ # Make the request to add the publisher to the feed
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ add_pub = dmc.add_publisher(feed_id, location, username, password)
+ add_pub.raise_for_status()
+ publisher_info = add_pub.json()
+ publisher_id = publisher_info["pubId"]
+ ctx.logger.info("Added publisher id {0} to feed {1} at {2}, with user {3}, pass {4}".format(publisher_id, feed_id, location, username, password))
+
+ # Set runtime properties on the source
+ ctx.source.instance.runtime_properties[target_feed] = {
+ "publisher_id" : publisher_id,
+ "location" : location,
+ "publish_url" : ctx.target.instance.runtime_properties["publish_url"],
+ "log_url" : ctx.target.instance.runtime_properties["log_url"],
+ "username" : username,
+ "password" : password
+ }
+
+ # Set key in Consul
+ ch = ConsulHandle("http://{0}:8500".format(CONSUL_HOST), None, None, ctx.logger)
+ cpy = dict(ctx.source.instance.runtime_properties[target_feed])
+ ch.add_to_entry("{0}:dmaap".format(ctx.source.instance.runtime_properties['service_component_name']), target_feed, cpy)
+
+ except Exception as e:
+ ctx.logger.error("Error adding publisher to feed: {er}".format(er=e))
+ raise NonRecoverableError(e)
+
+
+@operation
+def delete_dr_publisher(**kwargs):
+ '''
+ Deletes publisher (the source of the publishes_files relationship)
+ from the feed (the target of the relationship).
+ Assumes that the 'publisher_id' property was added to the dictionary of feed-related properties,
+ when the publisher was added to the feed.
+ '''
+
+ try:
+ # Make sure we have a name under which to store DMaaP configuration
+ # Check early so we don't needlessly create DMaaP entities
+ if 'service_component_name' not in ctx.source.instance.runtime_properties:
+ raise Exception("Source node does not have 'service_component_name' in runtime_properties")
+
+ # Get the publisher id
+ target_feed = ctx.target.node.id
+ publisher_id = ctx.source.instance.runtime_properties[target_feed]["publisher_id"]
+ ctx.logger.info("Attempting to delete publisher {0}".format(publisher_id))
+
+ # Make the request
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ del_result = dmc.delete_publisher(publisher_id)
+ del_result.raise_for_status()
+
+ ctx.logger.info("Deleted publisher {0}".format(publisher_id))
+
+ # Attempt to remove the entire ":dmaap" entry from the Consul KV store
+ # Will quietly do nothing if the entry has already been removed
+ ch = ConsulHandle("http://{0}:8500".format(CONSUL_HOST), None, None, ctx.logger)
+ ch.delete_entry("{0}:dmaap".format(ctx.source.instance.runtime_properties['service_component_name']))
+
+ except Exception as e:
+ ctx.logger.error("Error deleting publisher: {er}".format(er=e))
+ # don't raise a NonRecoverable error here--let the uninstall workflow continue
+
+
+@operation
+def add_dr_subscriber(**kwargs):
+ '''
+ Sets up the source of the subscribes_to_files relationship as a subscriber to the
+ feed that is the target of the relationship.
+ Assumes target (the feed) has the following runtime property set
+ - feed_id
+ Assumes source (the subscriber) has a runtime property whose name matches the node name of the feed.
+ This is a dictionary containing the following properties:
+ - location (the dcaeLocationName to pass when adding the publisher to the feed)
+ - delivery_url (the URL to which data router will deliver files)
+ - username (the username data router will use when delivering files)
+ - password (the password data router will use when delivering files)
+ Adds a property to the dictionary above:
+ - subscriber_id (used to delete the subscriber in the uninstall workflow
+ '''
+ try:
+ target_feed = ctx.target.node.id
+ ctx.logger.info("Attempting to add subscriber {0} to feed {1}".format(ctx.source.node.id, target_feed))
+
+ # Get the parameters for the call
+ feed_id = ctx.target.instance.runtime_properties["feed_id"]
+ feed = ctx.source.instance.runtime_properties[target_feed]
+ location = feed["location"]
+ delivery_url = feed["delivery_url"]
+ username = feed["username"]
+ password = feed["password"]
+ decompress = feed["decompress"] if "decompress" in feed else False
+ privileged = feed["privileged"] if "privileged" in feed else False
+
+ # Make the request to add the subscriber to the feed
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ add_sub = dmc.add_subscriber(feed_id, location, delivery_url,username, password, decompress, privileged)
+ add_sub.raise_for_status()
+ subscriber_info = add_sub.json()
+ subscriber_id = subscriber_info["subId"]
+ ctx.logger.info("Added subscriber id {0} to feed {1} at {2}".format(subscriber_id, feed_id, location))
+
+ # Add subscriber_id to the runtime properties
+ # ctx.source.instance.runtime_properties[target_feed]["subscriber_id"] = subscriber_id
+ ctx.source.instance.runtime_properties[target_feed] = {
+ "subscriber_id": subscriber_id,
+ "location" : location,
+ "delivery_url" : delivery_url,
+ "username" : username,
+ "password" : password,
+ "decompress": decompress,
+ "privilegedSubscriber": privileged
+ }
+ ctx.logger.info("on source: {0}".format(ctx.source.instance.runtime_properties[target_feed]))
+
+ # Set key in Consul
+ ch = ConsulHandle("http://{0}:8500".format(CONSUL_HOST), None, None, ctx.logger)
+ cpy = dict(ctx.source.instance.runtime_properties[target_feed])
+ ch.add_to_entry("{0}:dmaap".format(ctx.source.instance.runtime_properties['service_component_name']), target_feed, cpy)
+
+ except Exception as e:
+ ctx.logger.error("Error adding subscriber to feed: {er}".format(er=e))
+ raise NonRecoverableError(e)
+
+
+@operation
+def delete_dr_subscriber(**kwargs):
+ '''
+ Deletes subscriber (the source of the subscribes_to_files relationship)
+ from the feed (the target of the relationship).
+ Assumes that the source node's runtime properties dictionary for the target feed
+ includes 'subscriber_id', set when the publisher was added to the feed.
+ '''
+ try:
+ # Get the subscriber id
+ target_feed = ctx.target.node.id
+ subscriber_id = ctx.source.instance.runtime_properties[target_feed]["subscriber_id"]
+ ctx.logger.info("Attempting to delete subscriber {0} from feed {1}".format(subscriber_id, target_feed))
+
+ # Make the request
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ del_result = dmc.delete_subscriber(subscriber_id)
+ del_result.raise_for_status()
+
+ ctx.logger.info("Deleted subscriber {0}".format(subscriber_id))
+
+ # Attempt to remove the entire ":dmaap" entry from the Consul KV store
+ # Will quietly do nothing if the entry has already been removed
+ ch = ConsulHandle("http://{0}:8500".format(CONSUL_HOST), None, None, ctx.logger)
+ ch.delete_entry("{0}:dmaap".format(ctx.source.instance.runtime_properties['service_component_name']))
+
+ except Exception as e:
+ ctx.logger.error("Error deleting subscriber: {er}".format(er=e))
+ # don't raise a NonRecoverable error here--let the uninstall workflow continue
diff --git a/dmaap/dmaapplugin/mr_lifecycle.py b/dmaap/dmaapplugin/mr_lifecycle.py
new file mode 100644
index 0000000..6fe3023
--- /dev/null
+++ b/dmaap/dmaapplugin/mr_lifecycle.py
@@ -0,0 +1,143 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from dmaapplugin import DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, DMAAP_OWNER
+from dmaapplugin.dmaaputils import random_string
+from dmaapcontrollerif.dmaap_requests import DMaaPControllerHandle
+
+# Lifecycle operations for DMaaP Message Router topics
+@operation
+def create_topic(**kwargs):
+ '''
+ Creates a message router topic.
+ Allows 'topic_name', 'topic_description', 'txenable', 'replication_case', 'global_mr_url',
+ and 'useExisting' as optional node properties. If 'topic_name' is not set,
+ generates a random one.
+ Sets 'fqtn' in the instance runtime_properties.
+ Note that 'txenable' is a Message Router flag indicating whether transactions
+ are enabled on the topic.
+ Note that 'useExisting' is a flag indicating whether DBCL will use existing topic if
+ the topic already exists.
+ '''
+ try:
+ # Make sure there's a topic_name
+ if "topic_name" in ctx.node.properties:
+ topic_name = ctx.node.properties["topic_name"]
+ if topic_name == '' or topic_name.isspace():
+ topic_name = random_string(12)
+ else:
+ topic_name = random_string(12)
+
+ # Make sure there's a topic description
+ if "topic_description" in ctx.node.properties:
+ topic_description = ctx.node.properties["topic_description"]
+ else:
+ topic_description = "No description provided"
+
+ # ..and the truly optional setting
+ if "txenable" in ctx.node.properties:
+ txenable = ctx.node.properties["txenable"]
+ else:
+ txenable= False
+
+ if "replication_case" in ctx.node.properties:
+ replication_case = ctx.node.properties["replication_case"]
+ else:
+ replication_case = None
+
+ if "global_mr_url" in ctx.node.properties:
+ global_mr_url = ctx.node.properties["global_mr_url"]
+ else:
+ global_mr_url = None
+
+ if "useExisting" in ctx.node.properties:
+ useExisting = ctx.node.properties["useExisting"]
+ else:
+ useExisting = False
+
+ # Make the request to the controller
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ ctx.logger.info("Attempting to create topic name {0}".format(topic_name))
+ t = dmc.create_topic(topic_name, topic_description, txenable, DMAAP_OWNER, replication_case, global_mr_url, useExisting)
+ t.raise_for_status()
+
+ # Capture important properties from the result
+ topic = t.json()
+ ctx.instance.runtime_properties["fqtn"] = topic["fqtn"]
+
+ except Exception as e:
+ ctx.logger.error("Error creating topic: {er}".format(er=e))
+ raise NonRecoverableError(e)
+
+@operation
+def get_existing_topic(**kwargs):
+ '''
+ Get data for an existing topic.
+ Expects 'fqtn' as a node property.
+ Copies this property to 'fqtn' in runtime properties for consistency
+ with a newly-created topic.
+ While there's no real need to make a call to the DMaaP bus controller,
+ we do so just to make sure the fqtn is known to the controller, so we
+ don't run into problems when we try to add a publisher or subscriber later.
+ '''
+ try:
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ fqtn_input = False
+ if "fqtn" in ctx.node.properties:
+ fqtn = ctx.node.properties["fqtn"]
+ fqtn_input = True
+ elif "topic_name" in ctx.node.properties:
+ topic_name = ctx.node.properties["topic_name"]
+ ctx.logger.info("Attempting to get fqtn for existing topic {0}".format(topic_name))
+ fqtn = dmc.get_topic_fqtn_by_name(topic_name)
+ if fqtn is None:
+ raise ValueError("Not find existing topic with name " + topic_name)
+ else:
+ ctx.logger.error("Not find existing topic with name {0}".format(topic_name))
+ raise ValueError("Either fqtn or topic_name must be defined to get existing topic")
+
+ ctx.logger.info("Attempting to get info for existing topic {0}".format(fqtn))
+ t = dmc.get_topic_info(fqtn)
+ t.raise_for_status()
+
+ ctx.instance.runtime_properties["fqtn"] = fqtn
+
+ except Exception as e:
+ ctx.logger.error("Error getting existing topic: {er}".format(er=e))
+ raise NonRecoverableError(e)
+
+@operation
+def delete_topic(**kwargs):
+ '''
+ Delete the topic. Expects the instance runtime property "fqtn" to have been
+ set when the topic was created.
+ '''
+ try:
+ fqtn = ctx.instance.runtime_properties["fqtn"]
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ ctx.logger.info("Attempting to delete topic {0}".format(fqtn))
+ t = dmc.delete_topic(fqtn)
+ t.raise_for_status()
+
+ except Exception as e:
+ ctx.logger.error("Error getting existing topic: {er}".format(er=e))
+ # don't raise a NonRecoverable error here--let the uninstall workflow continue
diff --git a/dmaap/dmaapplugin/mr_relationships.py b/dmaap/dmaapplugin/mr_relationships.py
new file mode 100644
index 0000000..34d02e2
--- /dev/null
+++ b/dmaap/dmaapplugin/mr_relationships.py
@@ -0,0 +1,119 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from dmaapplugin import DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, DMAAP_OWNER, CONSUL_HOST
+from dmaapcontrollerif.dmaap_requests import DMaaPControllerHandle
+from consulif.consulif import ConsulHandle
+
+# Message router relationship operations
+
+def _add_mr_client(ctype, actions):
+ '''
+ Adds the node represented by 'source' as a client (publisher or subscriber) to
+ to topic represented by the 'target' node. The list of actions in 'actions'
+ determines whether the client is a subscriber or a publisher.
+
+ Assumes target (the topic) has the following runtime property set
+ - fqtn
+ Assumes source (the client) has a runtime property whose name matches the node name of the feed.
+ This is a dictionary containing the following properties:
+ - location (the dcaeLocationName to pass when adding the client to the topic)
+ - client_role (the AAF client role under which the client will access the topic)
+ Adds two properties to the dictionary above:
+ - topic_url (the URL that the client can use to access the topic)
+ - client_id (used to delete the client in the uninstall workflow)
+ '''
+ try:
+ # Make sure we have a name under which to store DMaaP configuration
+ # Check early so we don't needlessly create DMaaP entities
+ if 'service_component_name' not in ctx.source.instance.runtime_properties:
+ raise Exception("Source node does not have 'service_component_name' in runtime_properties")
+
+ target_topic = ctx.target.node.id # Key for the source's dictionary with topic-related info
+ fqtn = ctx.target.instance.runtime_properties["fqtn"]
+ ctx.logger.info("Attempting to add {0} as {1} to topic {2}".format(ctx.source.node.id, ctype, fqtn))
+
+ # Get the parameters needed for adding the client
+ location = ctx.source.instance.runtime_properties[target_topic]["location"]
+ client_role = ctx.source.instance.runtime_properties[target_topic]["client_role"]
+
+ # Make the request to add the client to the topic
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ c = dmc.create_client(fqtn, location, client_role, actions)
+ c.raise_for_status()
+ client_info = c.json()
+ client_id = client_info["mrClientId"]
+ topic_url = client_info["topicURL"]
+
+ # Update source's runtime properties
+ #ctx.source.instance.runtime_properties[target_topic]["topic_url"] = topic_url
+ #ctx.source.instance.runtime_properties[target_topic]["client_id"] = client_id
+ ctx.source.instance.runtime_properties[target_topic] = {
+ "topic_url" : topic_url,
+ "client_id" : client_id,
+ "location" : location,
+ "client_role" : client_role
+ }
+
+ ctx.logger.info("Added {0} id {1} to feed {2} at {3}".format(ctype, client_id, fqtn, location))
+
+ # Set key in Consul
+ ch = ConsulHandle("http://{0}:8500".format(CONSUL_HOST), None, None, ctx.logger)
+ ch.add_to_entry("{0}:dmaap".format(ctx.source.instance.runtime_properties['service_component_name']), target_topic, ctx.source.instance.runtime_properties[target_topic])
+
+ except Exception as e:
+ ctx.logger.error("Error adding client to feed: {er}".format(er=e))
+ raise NonRecoverableError(e)
+
+@operation
+def add_mr_publisher(**kwargs):
+ _add_mr_client("publisher", ["view", "pub"])
+
+@operation
+def add_mr_subscriber(**kwargs):
+ _add_mr_client("subscriber", ["view", "sub"])
+
+@operation
+def delete_mr_client(**kwargs):
+ '''
+ Delete the client (publisher or subscriber).
+ Expect property 'client_id' to have been set in the instance's runtime_properties
+ when the client was created.
+ '''
+ try:
+ target_topic = ctx.target.node.id
+ client_id = ctx.source.instance.runtime_properties[target_topic]["client_id"]
+ ctx.logger.info("Attempting to delete client {0} ".format(client_id))
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ c = dmc.delete_client(client_id)
+ c.raise_for_status()
+
+ ctx.logger.info("Deleted client {0}".format(client_id))
+
+ # Attempt to remove the entire ":dmaap" entry from the Consul KV store
+ # Will quietly do nothing if the entry has already been removed
+ ch = ConsulHandle("http://{0}:8500".format(CONSUL_HOST), None, None, ctx.logger)
+ ch.delete_entry("{0}:dmaap".format(ctx.source.instance.runtime_properties['service_component_name']))
+
+ except Exception as e:
+ ctx.logger.error("Error deleting MR client: {er}".format(er=e))
+ # don't raise a NonRecoverable error here--let the uninstall workflow continue
+
diff --git a/dmaap/pom.xml b/dmaap/pom.xml
new file mode 100644
index 0000000..afc6089
--- /dev/null
+++ b/dmaap/pom.xml
@@ -0,0 +1,327 @@
+<?xml version="1.0"?>
+<!--
+============LICENSE_START=======================================================
+org.onap.dcaegen2
+================================================================================
+Copyright (c) 2019-2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.dcaegen2.platform</groupId>
+ <artifactId>plugins</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+
+ <!--- CHANGE THE FOLLOWING 3 OBJECTS for your own repo -->
+ <groupId>org.onap.dcaegen2.platform.plugins</groupId>
+ <artifactId>dmaap</artifactId>
+ <name>dmaap</name>
+
+ <version>1.5.0-SNAPSHOT</version>
+ <url>http://maven.apache.org</url>
+ <properties>
+ <!-- name from the setup.py file -->
+ <plugin.name>dmaap</plugin.name>
+ <!-- path to directory containing the setup.py relative to this file -->
+ <plugin.subdir>.</plugin.subdir>
+ <!-- path of types file itself relative to this file -->
+ <typefile.source>dmaap.yaml</typefile.source>
+ <!-- path, in repo, to store type file -->
+ <typefile.dest>type_files/dmaap/dmaap.yaml</typefile.dest>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sonar.sources>.</sonar.sources>
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPaths>coverage.xml</sonar.python.coverage.reportPaths>
+ <sonar.language>py</sonar.language>
+ <sonar.pluginName>Python</sonar.pluginName>
+ <sonar.inclusions>**/*.py</sonar.inclusions>
+ <sonar.exclusions>tests/*,setup.py</sonar.exclusions>
+ </properties>
+
+ <build>
+ <finalName>${project.artifactId}-${project.version}</finalName>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>sonar-maven-plugin</artifactId>
+ <version>2.7.1</version>
+ </plugin>
+
+ <!-- nexus-staging-maven-plugin is called during deploy phase by default behavior.
+ we do not need it -->
+ <plugin>
+ <groupId>org.sonatype.plugins</groupId>
+ <artifactId>nexus-staging-maven-plugin</artifactId>
+ <version>1.6.7</version>
+ <configuration>
+ <skipNexusStagingDeployMojo>true</skipNexusStagingDeployMojo>
+ </configuration>
+ </plugin>
+
+ <!-- maven-deploy-plugin is called during deploy but we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <version>2.8</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
+ <plugins>
+
+ <!-- first disable the default Java plugins at various stages -->
+ <!-- maven-resources-plugin is called during "*resource" phases by default behavior. it prepares the resources
+ dir. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>2.6</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-compiler-plugin is called during "compile" phases by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.1</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-jar-plugin is called during "compile" phase by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.4</version>
+ <executions>
+ <execution>
+ <id>default-jar</id>
+ <phase/>
+ </execution>
+ </executions>
+ </plugin>
+
+ <!-- maven-install-plugin is called during "install" phase by default behavior. it tries to copy stuff under
+ target dir to ~/.m2. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <version>2.4</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-surefire-plugin is called during "test" phase by default behavior. it triggers junit test.
+ we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.12.4</version>
+ <configuration>
+ <skipTests>true</skipTests>
+ </configuration>
+ </plugin>
+
+ <!-- now we configure custom action (calling a script) at various lifecycle phases -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>clean phase script</id>
+ <phase>clean</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>clean</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>generate-sources script</id>
+ <phase>generate-sources</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>generate-sources</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>compile script</id>
+ <phase>compile</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>compile</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>package script</id>
+ <phase>package</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>package</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>test script</id>
+ <phase>test</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>test</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>install script</id>
+ <phase>install</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>install</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>deploy script</id>
+ <phase>deploy</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>deploy</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <MVN_SERVER_ID>${project.distributionManagement.snapshotRepository.id}</MVN_SERVER_ID>
+ <TYPE_FILE_SOURCE>${typefile.source}</TYPE_FILE_SOURCE>
+ <TYPE_FILE_DEST>${typefile.dest}</TYPE_FILE_DEST>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/dmaap/requirements.txt b/dmaap/requirements.txt
new file mode 100644
index 0000000..54ffbc4
--- /dev/null
+++ b/dmaap/requirements.txt
@@ -0,0 +1,3 @@
+python-consul>=0.7.0
+requests
+cloudify-common>=5.0.5
diff --git a/dmaap/setup.py b/dmaap/setup.py
new file mode 100644
index 0000000..1bdb14f
--- /dev/null
+++ b/dmaap/setup.py
@@ -0,0 +1,36 @@
+# ============LICENSE_START====================================================
+# =============================================================================
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+from setuptools import setup, find_packages
+
+setup(
+ name = "dmaap",
+ version = "1.5.0",
+ packages=find_packages(),
+ author = "AT&T",
+ description = ("Cloudify plugin for creating DMaaP feeds and topics, and setting up publishers and subscribers."),
+ license = "",
+ keywords = "",
+ url = "",
+ zip_safe=False,
+ install_requires=[
+ 'python-consul>=0.7.0',
+ 'requests',
+ 'cloudify-common>=5.0.5',
+ ],
+)
diff --git a/dmaap/tests/conftest.py b/dmaap/tests/conftest.py
new file mode 100644
index 0000000..9ae7b40
--- /dev/null
+++ b/dmaap/tests/conftest.py
@@ -0,0 +1,88 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+
+import pytest
+
+import requests
+
+@pytest.fixture()
+def mockconsul(monkeypatch):
+ """ Override the regular Consul interface"""
+ def fake_get_config(self, key):
+ config={'dmaap': {
+ 'username': 'testuser@dmaaptest.example.com',
+ 'url': 'https://dmaaptest.example.com:8443/webapi',
+ 'password' : 'testpassword',
+ 'owner': 'dcaeorch'
+ }}
+ return config
+
+ def fake_get_service(self, service_name):
+ service_address = "myAddress"
+ service_port= "8443"
+ return service_address, service_port
+
+ def fake_add_to_entry(self, key, add_name, add_value):
+ return True
+
+ def fake_delete_entry(self, entry_name):
+ return True
+
+ def fake_init(self, api_url, user, password, logger):
+ pass
+
+ from consulif.consulif import ConsulHandle
+ monkeypatch.setattr(ConsulHandle, 'get_config', fake_get_config)
+ monkeypatch.setattr(ConsulHandle, 'get_service', fake_get_service)
+ monkeypatch.setattr(ConsulHandle, 'add_to_entry', fake_add_to_entry)
+ monkeypatch.setattr(ConsulHandle, 'delete_entry', fake_delete_entry)
+ monkeypatch.setattr(ConsulHandle, '__init__', fake_init)
+
+ def get_handle():
+ return ConsulHandle('mockconsul', None, None, None)
+ return get_handle
+
+
+@pytest.fixture()
+def mockdmaapbc(monkeypatch):
+
+ def fake_get(url, auth):
+ # print "fake_get: {0}, {1}".format(url, auth)
+ r = requests.Response()
+ r.status_code = 200
+ return r
+ def fake_post(url, auth, json):
+ # print "fake_post: {0}, {1}, {2}".format(url, auth, json)
+ r = requests.Response()
+ r.status_code = 200
+ return r
+ def fake_delete(url, auth):
+ # print "fake_delete: {0}, {1}".format(url, auth)
+ r = requests.Response()
+ r.status_code = 200
+ return r
+ def fake_json(self):
+ return {"fqtn":"test_fqtn"}
+
+ import requests
+ monkeypatch.setattr(requests.Response, "json", fake_json)
+ monkeypatch.setattr(requests, "get", fake_get)
+ monkeypatch.setattr(requests, "post", fake_post)
+ monkeypatch.setattr(requests, "delete", fake_delete)
+
diff --git a/dmaap/tests/test_consulif.py b/dmaap/tests/test_consulif.py
new file mode 100644
index 0000000..a45c6a4
--- /dev/null
+++ b/dmaap/tests/test_consulif.py
@@ -0,0 +1,72 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+
+
+import pytest
+from cloudify.exceptions import NonRecoverableError
+import os
+from consulif.consulif import ConsulHandle
+
+
+# No connections are actually made to this host
+CONSUL_HOST = "consul" # Should always be a local consul agent on Cloudify Manager
+#CONSUL_PORT = '8510'
+CONSUL_PORT = '8500'
+DBCL_KEY_NAME = "dmaap_dbcl_info" # Consul key containing DMaaP data bus credentials
+DBC_SERVICE_NAME= "dmaap_bus_controller" # Name under which the DMaaP bus controller is registered
+
+
+def test_get_config_service(mockconsul):
+ err_msg = "Error getting ConsulHandle when configuring dmaap plugin: {0}"
+ _ch = ConsulHandle("http://{0}:{1}".format(CONSUL_HOST, CONSUL_PORT), None, None, None)
+
+ config = _ch.get_config(DBCL_KEY_NAME)
+
+ DMAAP_USER = config['dmaap']['username']
+ DMAAP_PASS = config['dmaap']['password']
+ DMAAP_OWNER = config['dmaap']['owner']
+
+ if 'protocol' in config['dmaap']:
+ DMAAP_PROTOCOL = config['dmaap']['protocol']
+ else:
+ DMAAP_PROTOCOL = 'https' # Default to https (service discovery should give us this but doesn't
+
+ if 'path' in config['dmaap']:
+ DMAAP_PATH = config['dmaap']['path']
+ else:
+ DMAAP_PATH = 'webapi' # Should come from service discovery but Consul doesn't support it
+
+ service_address, service_port = _ch.get_service(DBC_SERVICE_NAME)
+
+ DMAAP_API_URL = '{0}://{1}:{2}/{3}'.format(DMAAP_PROTOCOL, service_address, service_port, DMAAP_PATH)
+
+
+def test_add_entry(mockconsul):
+ _ch = ConsulHandle("http://{0}:{1}".format(CONSUL_HOST, CONSUL_PORT), None, None, None)
+
+ key = 'DMAAP_TEST'
+ name = 'dmaap_test_name'
+ value = 'dmaap_test_value'
+ _ch.add_to_entry(key, name, value)
+
+ name = "dmaap_test_name_2"
+ value = 'dmaap_test_value_2'
+ _ch.add_to_entry(key, name, value)
+
+ _ch.delete_entry(key)
diff --git a/dmaap/tests/test_dmaapcontrollerif.py b/dmaap/tests/test_dmaapcontrollerif.py
new file mode 100644
index 0000000..25ddb88
--- /dev/null
+++ b/dmaap/tests/test_dmaapcontrollerif.py
@@ -0,0 +1,113 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+
+import pytest
+import requests
+from cloudify.mocks import MockCloudifyContext
+from cloudify.state import current_ctx
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+
+
+import test_consulif
+from dmaapcontrollerif.dmaap_requests import DMaaPControllerHandle
+
+import logging
+logger = logging.getLogger("test_mr_lifecycle")
+
+_goodosv2 = {
+ 'auth_url': 'https://example.com/identity/v2.0',
+ 'password': 'pw',
+ 'region': 'r',
+ 'tenant_name': 'tn',
+ 'username': 'un'
+}
+
+
+def test_dmaapc (monkeypatch, mockconsul, mockdmaapbc):
+ from dmaapplugin.dmaaputils import random_string
+
+ config = mockconsul().get_config('mockkey')['dmaap']
+ DMAAP_API_URL = config['url']
+ DMAAP_USER = config['username']
+ DMAAP_PASS = config['password']
+ DMAAP_OWNER = config['owner']
+
+ properties = {'fqdn': 'a.x.example.com', 'openstack': _goodosv2 }
+ mock_ctx = MockCloudifyContext(
+ node_id='test_node_id',
+ node_name='test_node_name',
+ properties=properties,
+ runtime_properties = {
+ "admin": { "user": "admin_user" },
+ "user": { "user": "user_user" },
+ "viewer": { "user": "viewer_user" }
+ })
+
+ current_ctx.set(mock_ctx)
+
+ kwargs = { "topic_name": "ONAP_test",
+ "topic_description": "onap dmaap plugin unit test topic"}
+
+ # Make sure there's a topic_name
+ if "topic_name" in ctx.node.properties:
+ topic_name = ctx.node.properties["topic_name"]
+ if topic_name == '' or topic_name.isspace():
+ topic_name = random_string(12)
+ else:
+ topic_name = random_string(12)
+
+ # Make sure there's a topic description
+ if "topic_description" in ctx.node.properties:
+ topic_description = ctx.node.properties["topic_description"]
+ else:
+ topic_description = "No description provided"
+
+ # ..and the truly optional setting
+ if "txenable" in ctx.node.properties:
+ txenable = ctx.node.properties["txenable"]
+ else:
+ txenable= False
+
+ if "replication_case" in ctx.node.properties:
+ replication_case = ctx.node.properties["replication_case"]
+ else:
+ replication_case = None
+
+ if "global_mr_url" in ctx.node.properties:
+ global_mr_url = ctx.node.properties["global_mr_url"]
+ else:
+ global_mr_url = None
+
+ dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
+ ctx.logger.info("Attempting to create topic name {0}".format(topic_name))
+ t = dmc.create_topic(topic_name, topic_description, txenable, DMAAP_OWNER, replication_case, global_mr_url)
+
+ # Capture important properties from the result
+ topic = t.json()
+ ctx.instance.runtime_properties["fqtn"] = topic["fqtn"]
+
+ # test DMaaPControllerHandle functions
+ path = "myPath"
+ url = dmc._make_url(path)
+ rc = dmc._get_resource(path)
+ rc = dmc._create_resource(path, None)
+ rc = dmc._delete_resource(path)
diff --git a/dmaap/tests/test_dr_lifecycle.py b/dmaap/tests/test_dr_lifecycle.py
new file mode 100644
index 0000000..2aa65e8
--- /dev/null
+++ b/dmaap/tests/test_dr_lifecycle.py
@@ -0,0 +1,65 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+
+import pytest
+import requests
+from cloudify.mocks import MockCloudifyContext
+from cloudify.state import current_ctx
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from cloudify.exceptions import RecoverableError
+
+_goodosv2 = {
+ 'auth_url': 'https://example.com/identity/v2.0',
+ 'password': 'pw',
+ 'region': 'r',
+ 'tenant_name': 'tn',
+ 'username': 'un'
+}
+
+
+def test_create_feed(monkeypatch, mockconsul, mockdmaapbc):
+ import dmaapplugin
+ from dmaapplugin import dr_lifecycle
+
+ properties = {'fqdn': 'a.x.example.com', 'openstack': _goodosv2, 'feed_id': 'test_feed_id' }
+ mock_ctx = MockCloudifyContext(
+ node_id='test_node_id',
+ node_name='test_node_name',
+ properties=properties,
+ runtime_properties = {
+ "admin": { "user": "admin_user" },
+ "user": { "user": "user_user" },
+ "viewer": { "user": "viewer_user" }
+ })
+
+ current_ctx.set(mock_ctx)
+
+ kwargs = { "feed_name": "ONAP_test",
+ "feed_description": "onap dmaap plugin unit test feed"}
+
+ def fake_feed(self):
+ return {"feedId":"test_feedId", "publishURL":"test_publishURL", "logURL":"test_logURL" }
+ monkeypatch.setattr(requests.Response, "json", fake_feed)
+
+ dr_lifecycle.create_feed(**kwargs)
+ dr_lifecycle.get_existing_feed(**kwargs)
+ dr_lifecycle.delete_feed(**kwargs)
diff --git a/dmaap/tests/test_mr_lifecycle.py b/dmaap/tests/test_mr_lifecycle.py
new file mode 100644
index 0000000..4a6a583
--- /dev/null
+++ b/dmaap/tests/test_mr_lifecycle.py
@@ -0,0 +1,59 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+import pytest
+import requests
+from cloudify.mocks import MockCloudifyContext
+from cloudify.state import current_ctx
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from cloudify.exceptions import RecoverableError
+
+_goodosv2 = {
+ 'auth_url': 'https://example.com/identity/v2.0',
+ 'password': 'pw',
+ 'region': 'r',
+ 'tenant_name': 'tn',
+ 'username': 'un'
+}
+
+
+def test_create_topic(monkeypatch, mockconsul, mockdmaapbc):
+ import dmaapplugin
+ from dmaapplugin import mr_lifecycle
+ properties = {'fqdn': 'a.x.example.com', 'openstack': _goodosv2, 'fqtn': 'test_fqtn' }
+ mock_ctx = MockCloudifyContext(
+ node_id='test_node_id',
+ node_name='test_node_name',
+ properties=properties,
+ runtime_properties = {
+ "admin": { "user": "admin_user" },
+ "user": { "user": "user_user" },
+ "viewer": { "user": "viewer_user" }
+ })
+
+ current_ctx.set(mock_ctx)
+
+ kwargs = { "topic_name": "ONAP_test",
+ "topic_description": "onap dmaap plugin unit test topic"}
+
+ mr_lifecycle.create_topic(**kwargs)
+ mr_lifecycle.get_existing_topic(**kwargs)
+ mr_lifecycle.delete_topic(**kwargs)
diff --git a/dmaap/tests/test_plugin.py b/dmaap/tests/test_plugin.py
new file mode 100644
index 0000000..e2a8586
--- /dev/null
+++ b/dmaap/tests/test_plugin.py
@@ -0,0 +1,26 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+import pytest
+import requests
+from cloudify.mocks import MockCloudifyContext
+from cloudify.state import current_ctx
+from cloudify.exceptions import NonRecoverableError
+
+def test_noop():
+ pass
diff --git a/dmaap/tests/test_utils.py b/dmaap/tests/test_utils.py
new file mode 100644
index 0000000..362948d
--- /dev/null
+++ b/dmaap/tests/test_utils.py
@@ -0,0 +1,26 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+
+import pytest
+
+
+def test_random_string(monkeypatch):
+ from dmaapplugin import dmaaputils
+ target_length = 10
+ assert len(dmaaputils.random_string(target_length)) == target_length
diff --git a/dmaap/tox.ini b/dmaap/tox.ini
new file mode 100644
index 0000000..5bcede7
--- /dev/null
+++ b/dmaap/tox.ini
@@ -0,0 +1,36 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+[tox]
+envlist = py27,py36,py37,py38
+skip_missing_interpreters = true
+
+[testenv]
+deps=
+ pytest
+ coverage
+ pytest-cov
+ -rrequirements.txt
+setenv =
+ PYTHONPATH={toxinidir}
+commands=
+ pytest --junitxml xunit-results.xml --cov dmaapcontrollerif --cov consulif --cov dmaapplugin --cov-report xml
+ coverage xml
+ coverage report
+ coverage html
diff --git a/helm/LICENSE b/helm/LICENSE
new file mode 100644
index 0000000..0bb3eef
--- /dev/null
+++ b/helm/LICENSE
@@ -0,0 +1,29 @@
+Software license:
+================================================================================
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+
+Documentation license:
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+===================================================================
+Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+you may not use this documentation except in compliance with the License.
+You may obtain a copy of the License at
+ https://creativecommons.org/licenses/by/4.0/
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/helm/README.md b/helm/README.md
new file mode 100644
index 0000000..4128fba
--- /dev/null
+++ b/helm/README.md
@@ -0,0 +1,12 @@
+cloudify-plugin-template
+========================
+
+[![Build Status](https://travis-ci.org/cloudify-cosmo/cloudify-plugin-template.svg?branch=master)](https://travis-ci.org/cloudify-cosmo/cloudify-plugin-template)
+
+ONAP Helm Plugin.
+This Plugin will utilize the ONAP helm chart to install, uninstall, upgrade, and rollback ONAP components.
+
+## Documents
+
+See https://wiki.onap.org/display/DW/Introduction+of+Helm+Plugin
+
diff --git a/helm/helm-type.yaml b/helm/helm-type.yaml
new file mode 100644
index 0000000..25b66d5
--- /dev/null
+++ b/helm/helm-type.yaml
@@ -0,0 +1,147 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2017-2020 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+plugins:
+ helm-plugin:
+ executor: central_deployment_agent
+ package_name: helm
+ package_version: 4.1.0
+
+node_types:
+
+ onap.nodes.component:
+ derived_from: cloudify.nodes.Root
+ properties:
+ tiller_ip:
+ description: IP of tiller server
+ type: string
+ tiller_port:
+ default: local
+ description: Port of tiller server
+ type: string
+ chart_repo_url:
+ default: local
+ description: helm chart repo url
+ type: string
+ component_name:
+ description: onap component string
+ type: string
+ chart_version:
+ description: helm chart version
+ type: string
+ config_dir:
+ description: config file dir
+ default: '/opt/manager/resources/'
+ type: string
+ namespace:
+ description: k8s namespace
+ default: onap
+ config:
+ description: json object string
+ type: string
+ default: ''
+ config_url:
+ description: config file url, supports multiple urls seperated by commas
+ type: string
+ default: ''
+ config_format:
+ description: config file format - json or yaml
+ type: string
+ default: 'yaml'
+ runtime_config:
+ default: ''
+ description: json object string, runtime config generate from other nodes.
+ config_set:
+ default: ''
+ description: json object string for supporting helm --set option.
+ tls_enable:
+ description: enable helm TSL
+ type: boolean
+ default: false
+ ca:
+ description: value of ca.pem
+ type: string
+ default: ''
+ cert:
+ description: value of cert.pem
+ type: string
+ default: ''
+ key:
+ description: value of key.pem
+ type: string
+ default: ''
+ stable_repo_url:
+ description: URL for stable repository
+ type: string
+ default: 'https://kubernetes-charts.storage.googleapis.com'
+ repo_user:
+ description: chart repo user name
+ type: string
+ default: ''
+ repo_user_password:
+ description: chart repo user password
+ type: string
+ default: ''
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ configure: helm-plugin.plugin.tasks.config
+ start: helm-plugin.plugin.tasks.start
+ stop: helm-plugin.plugin.tasks.stop
+ upgrade: helm-plugin.plugin.tasks.upgrade
+ rollback: helm-plugin.plugin.tasks.rollback
+ status: helm-plugin.plugin.tasks.status
+
+
+workflows:
+ upgrade:
+ mapping: helm-plugin.plugin.workflows.upgrade
+ parameters:
+ node_instance_id:
+ description: The id of the node-instance that you want to modify.
+ config_set:
+ description: set option string
+ default: ''
+ config:
+ description: json object string
+ default: ''
+ config_url:
+ description: config input url, supports multiple urls seperated by commas
+ default: ''
+ config_format:
+ description: config input file format
+ default: 'yaml'
+ chart_version:
+ description: chart version
+ chart_repo_url:
+ description: chart repo url
+ repo_user:
+ description: chart repo user name
+ default: ''
+ repo_user_password:
+ description: chart repo user password
+ default: ''
+ rollback:
+ mapping: helm-plugin.plugin.workflows.rollback
+ parameters:
+ node_instance_id:
+ description: The id of the node-instance that you want to modify.
+ revision:
+ description: Check the node runtime property history, find the revision number you want to rollback to
+ status:
+ mapping: helm-plugin.plugin.workflows.status
+ parameters:
diff --git a/helm/plugin/__init__.py b/helm/plugin/__init__.py
new file mode 100644
index 0000000..7c9762b
--- /dev/null
+++ b/helm/plugin/__init__.py
@@ -0,0 +1,15 @@
+########
+# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
diff --git a/helm/plugin/tasks.py b/helm/plugin/tasks.py
new file mode 100644
index 0000000..9c0e2fe
--- /dev/null
+++ b/helm/plugin/tasks.py
@@ -0,0 +1,504 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2018-2020 AT&T
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+
+import shutil
+import errno
+import sys
+import pwd
+import grp
+import os
+import re
+import getpass
+import subprocess
+import json
+import base64
+import yaml
+try:
+ from urllib.request import Request, urlopen
+except ImportError:
+ from urllib2 import Request, urlopen
+
+from cloudify import ctx
+from cloudify import exceptions
+from cloudify.decorators import operation
+from cloudify.exceptions import OperationRetry
+from cloudify.exceptions import NonRecoverableError
+from cloudify_rest_client.exceptions import CloudifyClientError
+
+
+def debug_log_mask_credentials(_command_str):
+ debug_str = _command_str
+ if _command_str.find("@") != -1:
+ head, end = _command_str.rsplit('@', 1)
+ proto, auth = head.rsplit('//', 1)
+ uname, passwd = auth.rsplit(':', 1)
+ debug_str = _command_str.replace(passwd, "************")
+ ctx.logger.debug('command {0}.'.format(debug_str))
+
+def execute_command(_command):
+ debug_log_mask_credentials(_command)
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ debug_log_mask_credentials(str(subprocess_args))
+ try:
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+ except Exception as e:
+ ctx.logger.debug(str(e))
+ return False
+
+ debug_log_mask_credentials(_command)
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Error was returned while running helm command')
+ return False
+
+ return output
+
+
+def configure_admin_conf():
+ # Add the kubeadmin config to environment
+ agent_user = getpass.getuser()
+ uid = pwd.getpwnam(agent_user).pw_uid
+ gid = grp.getgrnam('docker').gr_gid
+ admin_file_dest = os.path.join(os.path.expanduser('~'), 'admin.conf')
+
+ execute_command(
+ 'sudo cp {0} {1}'.format('/etc/kubernetes/admin.conf',
+ admin_file_dest))
+ execute_command('sudo chown {0}:{1} {2}'.format(uid, gid, admin_file_dest))
+
+ with open(os.path.join(os.path.expanduser('~'), '.bashrc'),
+ 'a') as outfile:
+ outfile.write('export KUBECONFIG=$HOME/admin.conf')
+ os.environ['KUBECONFIG'] = admin_file_dest
+
+
+def get_current_helm_value(chart_name):
+ tiller_host = str(ctx.node.properties['tiller_ip']) + ':' + str(
+ ctx.node.properties['tiller_port'])
+ config_dir_root = str(ctx.node.properties['config_dir'])
+ config_dir = config_dir_root + str(ctx.deployment.id) + '/'
+ if str_to_bool(ctx.node.properties['tls_enable']):
+ getValueCommand = subprocess.Popen(
+ ["helm", "get", "values", "-a", chart_name, '--host', tiller_host,
+ '--tls', '--tls-ca-cert', config_dir + 'ca.cert.pem',
+ '--tls-cert',
+ config_dir + 'helm.cert.pem', '--tls-key',
+ config_dir + 'helm.key.pem'], stdout=subprocess.PIPE)
+ else:
+ getValueCommand = subprocess.Popen(
+ ["helm", "get", "values", "-a", chart_name, '--host', tiller_host],
+ stdout=subprocess.PIPE)
+ value = getValueCommand.communicate()[0]
+ valueMap = {}
+ valueMap = yaml.safe_load(value)
+ ctx.instance.runtime_properties['current-helm-value'] = valueMap
+
+
+def get_helm_history(chart_name):
+ tiller_host = str(ctx.node.properties['tiller_ip']) + ':' + str(
+ ctx.node.properties['tiller_port'])
+ config_dir_root = str(ctx.node.properties['config_dir'])
+ config_dir = config_dir_root + str(ctx.deployment.id) + '/'
+ if str_to_bool(ctx.node.properties['tls_enable']):
+ getHistoryCommand = subprocess.Popen(
+ ["helm", "history", chart_name, '--host', tiller_host, '--tls',
+ '--tls-ca-cert', config_dir + 'ca.cert.pem', '--tls-cert',
+ config_dir + 'helm.cert.pem', '--tls-key',
+ config_dir + 'helm.key.pem'], stdout=subprocess.PIPE)
+ else:
+ getHistoryCommand = subprocess.Popen(
+ ["helm", "history", chart_name, '--host', tiller_host],
+ stdout=subprocess.PIPE)
+ history = getHistoryCommand.communicate()[0]
+ history_start_output = [line.strip() for line in history.split('\n') if
+ line.strip()]
+ for index in range(len(history_start_output)):
+ history_start_output[index] = history_start_output[index].replace('\t',
+ ' ')
+ ctx.instance.runtime_properties['helm-history'] = history_start_output
+
+
+def tls():
+ if str_to_bool(ctx.node.properties['tls_enable']):
+ config_dir_root = str(ctx.node.properties['config_dir'])
+ config_dir = config_dir_root + str(ctx.deployment.id) + '/'
+ tls_command = ' --tls --tls-ca-cert ' + config_dir + 'ca.cert.pem ' \
+ '--tls-cert ' + \
+ config_dir + 'helm.cert.pem --tls-key ' + config_dir + \
+ 'helm.key.pem '
+ ctx.logger.debug(tls_command)
+ return tls_command
+ else:
+ return ''
+
+
+def tiller_host():
+ tiller_host = ' --host ' + str(
+ ctx.node.properties['tiller_ip']) + ':' + str(
+ ctx.node.properties['tiller_port']) + ' '
+ ctx.logger.debug(tiller_host)
+ return tiller_host
+
+
+def str_to_bool(s):
+ s = str(s)
+ if s == 'True' or s == 'true':
+ return True
+ elif s == 'False' or s == 'false':
+ return False
+ else:
+ raise ValueError('Require [Tt]rue or [Ff]alse; got: {0}'.format(s))
+
+
+def get_config_json(config_json, config_path, config_opt_f, config_file_nm):
+ config_obj = {}
+ config_obj = json.loads(config_json)
+ config_file = config_path + config_file_nm + ".yaml"
+ gen_config_file(config_file, config_obj)
+ config_opt_f = config_opt_f + " -f " + config_file
+ return config_opt_f
+
+
+def pop_config_info(url, config_file, f_format, repo_user, repo_user_passwd):
+ if url.find("@") != -1:
+ head, end = url.rsplit('@', 1)
+ head, auth = head.rsplit('//', 1)
+ url = head + '//' + end
+ username, password = auth.rsplit(':', 1)
+ request = Request(url)
+ base64string = base64.encodestring(
+ '%s:%s' % (username, password)).replace('\n', '')
+ request.add_header("Authorization", "Basic %s" % base64string)
+ response = urlopen(request)
+ elif repo_user != '' and repo_user_passwd != '':
+ request = Request(url)
+ base64string = base64.b64encode('%s:%s' % (repo_user, repo_user_passwd))
+ request.add_header("Authorization", "Basic %s" % base64string)
+ response = urlopen(request)
+ else:
+ response = urlopen(url)
+
+ config_obj = {}
+ if f_format == 'json':
+ config_obj = json.load(response)
+ elif f_format == 'yaml':
+ config_obj = yaml.load(response)
+ else:
+ raise NonRecoverableError("Unable to get config input format.")
+
+ gen_config_file(config_file, config_obj)
+
+
+def gen_config_file(config_file, config_obj):
+ try:
+ with open(config_file, 'w') as outfile:
+ yaml.safe_dump(config_obj, outfile, default_flow_style=False)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def gen_config_str(config_file, config_opt_f):
+ try:
+ with open(config_file, 'w') as outfile:
+ yaml.safe_dump(config_opt_f, outfile, default_flow_style=False)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def get_rem_config(config_url, config_input_format, config_path, config_opt_f, config_file_nm, repo_user, repo_user_passwd):
+ ctx.logger.debug("config_url=" + config_url)
+ f_cnt = 0
+ # urls = config_url.split()
+ urls = [x.strip() for x in config_url.split(',')]
+ if len(urls) > 1:
+ for url in urls:
+ f_cnt = f_cnt + 1
+ config_file = config_path + config_file_nm + str(f_cnt) + ".yaml"
+ pop_config_info(url, config_file, config_input_format, repo_user, repo_user_passwd)
+ config_opt_f = config_opt_f + " -f " + config_file
+ else:
+ config_file = config_path + config_file_nm + ".yaml"
+ pop_config_info(config_url, config_file, config_input_format, repo_user, repo_user_passwd)
+ config_opt_f = config_opt_f + " -f " + config_file
+
+ return config_opt_f
+
+
+def get_config_str(config_file):
+ if os.path.isfile(config_file):
+ with open(config_file, 'r') as config_f:
+ return config_f.read().replace('\n', '')
+ return ''
+
+
+def opt(config_file):
+ opt_str = get_config_str(config_file)
+ if opt_str != '':
+ return opt_str.replace("'", "")
+ return opt_str
+
+def repo(repo_url, repo_user, repo_user_passwd):
+ if repo_user != '' and repo_user_passwd != '' and repo_url.find("@") == -1:
+ proto, ip = repo_url.rsplit('//', 1)
+ return proto + '//' + repo_user + ':' + repo_user_passwd + '@' + ip
+ else:
+ return repo_url
+
+
+@operation
+def config(**kwargs):
+ # create helm value file on K8s master
+ configJson = str(ctx.node.properties['config'])
+ configUrl = str(ctx.node.properties['config_url'])
+ configUrlInputFormat = str(ctx.node.properties['config_format'])
+ runtime_config = str(ctx.node.properties['runtime_config']) # json
+ componentName = ctx.node.properties['component_name']
+ config_dir_root = str(ctx.node.properties['config_dir'])
+ stable_repo_url = str(ctx.node.properties['stable_repo_url'])
+ config_opt_set = str(ctx.node.properties['config_set'])
+ repo_user = str(ctx.node.properties['repo_user'])
+ repo_user_passwd = str(ctx.node.properties['repo_user_password'])
+ ctx.logger.debug("debug " + configJson + runtime_config)
+ # load input config
+ config_dir = config_dir_root + str(ctx.deployment.id)
+
+ if not os.path.exists(config_dir):
+ try:
+ os.makedirs(config_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ ctx.logger.debug('tls-enable type ' + str(
+ type(str_to_bool(ctx.node.properties['tls_enable']))))
+
+ # create TLS cert files
+ if str_to_bool(ctx.node.properties['tls_enable']):
+ ctx.logger.debug('tls enable')
+ ca_value = ctx.node.properties['ca']
+ cert_value = ctx.node.properties['cert']
+ key_value = ctx.node.properties['key']
+ ca = open(config_dir + '/ca.cert.pem', "w+")
+ ca.write(ca_value)
+ ca.close()
+ cert = open(config_dir + '/helm.cert.pem', "w+")
+ cert.write(cert_value)
+ cert.close()
+ key = open(config_dir + '/helm.key.pem', "w+")
+ key.write(key_value)
+ key.close()
+ else:
+ ctx.logger.debug('tls disable')
+
+ config_path = config_dir + '/' + componentName + '/'
+ ctx.logger.debug(config_path)
+
+ if os.path.exists(config_path):
+ shutil.rmtree(config_path)
+
+ try:
+ os.makedirs(config_path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ config_opt_f = ""
+ if configJson == '' and configUrl == '':
+ ctx.logger.debug("Will use default HELM value")
+ elif configJson == '' and configUrl != '':
+ config_opt_f = get_rem_config(configUrl, configUrlInputFormat, config_path, config_opt_f, "rc", repo_user, repo_user_passwd)
+ elif configJson != '' and configUrl == '':
+ config_opt_f = get_config_json(configJson, config_path, config_opt_f, "lc")
+ else:
+ raise NonRecoverableError("Unable to get config input")
+
+ ctx.logger.debug("debug check runtime config")
+ if runtime_config == '':
+ ctx.logger.debug("there is no runtime config value")
+ else:
+ config_opt_f = get_config_json(runtime_config, config_path, config_opt_f, "rt")
+
+ if configUrl != '' or configJson != '' or runtime_config != '':
+ config_file = config_path + ".config_file"
+ gen_config_str(config_file, config_opt_f)
+
+ if config_opt_set != '':
+ config_file = config_path + ".config_set"
+ config_opt_set = " --set " + config_opt_set
+ gen_config_str(config_file, config_opt_set)
+
+ output = execute_command(
+ 'helm init --client-only --stable-repo-url ' + repo(stable_repo_url, repo_user, repo_user_passwd))
+ if output == False:
+ raise NonRecoverableError("helm init failed")
+
+
+@operation
+def start(**kwargs):
+ # install the ONAP Helm chart
+ # get properties from node
+ repo_user = str(ctx.node.properties['repo_user'])
+ repo_user_passwd = str(ctx.node.properties['repo_user_password'])
+ chartRepo = ctx.node.properties['chart_repo_url']
+ componentName = ctx.node.properties['component_name']
+ chartVersion = str(ctx.node.properties['chart_version'])
+ config_dir_root = str(ctx.node.properties['config_dir'])
+ namespace = ctx.node.properties['namespace']
+
+ config_path = config_dir_root + str(
+ ctx.deployment.id) + '/' + componentName + '/'
+ chart = chartRepo + "/" + componentName + "-" + str(chartVersion) + ".tgz"
+ chartName = namespace + "-" + componentName
+ config_file = config_path + ".config_file"
+ config_set = config_path + ".config_set"
+ installCommand = 'helm install ' + repo(chart, repo_user, repo_user_passwd) + ' --name ' + chartName + \
+ ' --namespace ' + namespace + opt(config_file) + \
+ opt(config_set) + tiller_host() + tls()
+
+ output = execute_command(installCommand)
+ if output == False:
+ return ctx.operation.retry(
+ message='helm install failed, re-try after 5 second ',
+ retry_after=5)
+
+ get_current_helm_value(chartName)
+ get_helm_history(chartName)
+
+
+@operation
+def stop(**kwargs):
+ # delete the ONAP helm chart
+ # configure_admin_conf()
+ # get properties from node
+ namespace = ctx.node.properties['namespace']
+ component = ctx.node.properties['component_name']
+ chartName = namespace + "-" + component
+ config_dir_root = str(ctx.node.properties['config_dir'])
+ # Delete helm chart
+ command = 'helm delete --purge ' + chartName + tiller_host() + tls()
+ output = execute_command(command)
+ if output == False:
+ raise NonRecoverableError("helm delete failed")
+ config_path = config_dir_root + str(
+ ctx.deployment.id) + '/' + component
+
+ if os.path.exists(config_path):
+ shutil.rmtree(config_path)
+
+
+@operation
+def upgrade(**kwargs):
+ config_dir_root = str(ctx.node.properties['config_dir'])
+ componentName = ctx.node.properties['component_name']
+ namespace = ctx.node.properties['namespace']
+ repo_user = kwargs['repo_user']
+ repo_user_passwd = kwargs['repo_user_passwd']
+ configJson = kwargs['config']
+ chartRepo = kwargs['chart_repo']
+ chartVersion = kwargs['chart_version']
+ config_set = kwargs['config_set']
+ config_json = kwargs['config_json']
+ config_url = kwargs['config_url']
+ config_format = kwargs['config_format']
+ config_path = config_dir_root + str(
+ ctx.deployment.id) + '/' + componentName + '/'
+
+ # ctx.logger.debug('debug ' + str(configJson))
+ chartName = namespace + "-" + componentName
+ chart = chartRepo + "/" + componentName + "-" + chartVersion + ".tgz"
+
+ config_opt_f = ""
+ if config_json == '' and config_url == '':
+ ctx.logger.debug("Will use default HELM values")
+ elif config_json == '' and config_url != '':
+ config_opt_f = get_rem_config(config_url, config_format, config_path, config_opt_f, "ru", repo_user, repo_user_passwd)
+ elif config_json != '' and config_url == '':
+ config_opt_f = get_config_json(config_json, config_path, config_opt_f, "lu")
+ else:
+ raise NonRecoverableError("Unable to get upgrade config input")
+
+ config_upd = ""
+ if config_url != '' or config_json != '':
+ config_upd = config_path + ".config_upd"
+ gen_config_str(config_upd, config_opt_f)
+
+ config_upd_set = ""
+ if config_set != '':
+ config_upd_set = config_path + ".config_upd_set"
+ config_opt_set = " --set " + config_set
+ gen_config_str(config_upd_set, config_opt_set)
+
+ upgradeCommand = 'helm upgrade ' + chartName + ' ' + repo(chart, repo_user, repo_user_passwd) + opt(config_upd) + \
+ opt(config_upd_set) + tiller_host() + tls()
+
+ output = execute_command(upgradeCommand)
+ if output == False:
+ return ctx.operation.retry(
+ message='helm upgrade failed, re-try after 5 second ',
+ retry_after=5)
+ get_current_helm_value(chartName)
+ get_helm_history(chartName)
+
+
+@operation
+def rollback(**kwargs):
+ # rollback to some revision
+ componentName = ctx.node.properties['component_name']
+ namespace = ctx.node.properties['namespace']
+ revision = kwargs['revision']
+ # configure_admin_conf()
+ chartName = namespace + "-" + componentName
+ rollbackCommand = 'helm rollback ' + chartName + ' ' + revision + tiller_host() + tls()
+ output = execute_command(rollbackCommand)
+ if output == False:
+ return ctx.operation.retry(
+ message='helm rollback failed, re-try after 5 second ',
+ retry_after=5)
+ get_current_helm_value(chartName)
+ get_helm_history(chartName)
+
+@operation
+def status(**kwargs):
+ componentName = ctx.node.properties['component_name']
+ namespace = ctx.node.properties['namespace']
+
+ chartName = namespace + "-" + componentName
+ statusCommand = 'helm status ' + chartName + tiller_host() + tls()
+ output = execute_command(statusCommand)
+ if output == False:
+ return ctx.operation.retry(
+ message='helm status failed, re-try after 5 second ',
+ retry_after=5)
+
+ status_output = [line.strip() for line in output.split('\n') if
+ line.strip()]
+ for index in range(len(status_output)):
+ status_output[index] = status_output[index].replace('\t', ' ')
+ ctx.instance.runtime_properties['install-status'] = status_output
diff --git a/helm/plugin/tests/__init__.py b/helm/plugin/tests/__init__.py
new file mode 100644
index 0000000..230e4c9
--- /dev/null
+++ b/helm/plugin/tests/__init__.py
@@ -0,0 +1,30 @@
+# ================================================================================
+# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+#
+#
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
+# you may not use this documentation except in compliance with the License.
+# You may obtain a copy of the License at
+# https://creativecommons.org/licenses/by/4.0/
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/helm/plugin/tests/blueprint/blueprint.yaml b/helm/plugin/tests/blueprint/blueprint.yaml
new file mode 100644
index 0000000..2a7198d
--- /dev/null
+++ b/helm/plugin/tests/blueprint/blueprint.yaml
@@ -0,0 +1,85 @@
+# DSL version, should appear in the main blueprint.yaml
+# and may appear in other imports. In such case, the versions must match
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ # importing cloudify related types, plugins, workflow, etc...
+ # to speed things up, it is possible downloading this file,
+ # including it in the blueprint directory and importing it
+ # instead.
+ - http://www.getcloudify.org/spec/cloudify/4.1.1/types.yaml
+ # relative import of plugin.yaml that resides in the blueprint directory
+ - plugin/test_plugin.yaml
+
+inputs:
+ # example input that could be injected by test
+ test_input:
+ description: an input for the test
+ default: default_test_input
+ tiller-server-ip:
+ default: 1.1.1.1
+ tiller-server-port:
+ default: 8888
+ namespace:
+ default: onap
+ chart-repo-url:
+ default: local
+ chart-version :
+ default: 2.0.0
+ jsonConfig:
+ default: ''
+ config-url:
+ default: ''
+ config-set:
+ default: ''
+ config-format:
+ default: 'json'
+ tls-enable:
+ type: boolean
+ default: false
+ config-dir:
+ type: string
+ default: './'
+ repo-user:
+ type: string
+ default: ''
+ repo-user-password:
+ type: string
+ default: ''
+ stable-repo-url:
+ type: string
+ default: 'http://0.0.0.0/stable'
+
+
+node_templates:
+ # defining a single node template that will serve as our test node
+ test_node:
+ # using base cloudify type
+ type: onap.nodes.component
+ properties:
+ tiller_ip: { get_input: tiller-server-ip }
+ tiller_port: { get_input: tiller-server-port }
+ component_name: test_node
+ chart_repo_url: { get_input: chart-repo-url }
+ chart_version: { get_input: chart-version }
+ namespace: { get_input: namespace }
+ config: { get_input: jsonConfig}
+ config_set: { get_input: config-set}
+ config_url: { get_input: config-url}
+ repo_user: { get_input: repo-user}
+ repo_user_password: { get_input: repo-user-password}
+ config_format: { get_input: config-format}
+ tls_enable: { get_input: tls-enable}
+ ca: "result of get_secret ca_value"
+ cert: "result of get_secret cert_value"
+ key: "result of get_secret key_value"
+ config_dir: { get_input: config-dir}
+ stable_repo_url: { get_input: stable-repo-url}
+
+outputs:
+ # example output the could be used to simplify assertions by test
+ test_output:
+ description: an output for the test
+ value:
+ helm-value: { get_attribute: [test_node, current-helm-value] }
+ helm-history: { get_attribute: [test_node, helm-history] }
diff --git a/helm/plugin/tests/blueprint/plugin/test_plugin.yaml b/helm/plugin/tests/blueprint/plugin/test_plugin.yaml
new file mode 100644
index 0000000..e9f874f
--- /dev/null
+++ b/helm/plugin/tests/blueprint/plugin/test_plugin.yaml
@@ -0,0 +1,139 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2017-2020 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+plugins:
+ helm-plugin:
+ executor: central_deployment_agent
+ package_name: helm
+ install: false
+
+node_types:
+
+ onap.nodes.component:
+ derived_from: cloudify.nodes.Root
+ properties:
+ tiller_ip:
+ description: IP of tiller server
+ type: string
+ tiller_port:
+ default: local
+ description: Port of tiller server
+ type: string
+ chart_repo_url:
+ default: local
+ description: helm chart repo url
+ type: string
+ component_name:
+ description: onap component string
+ type: string
+ chart_version:
+ description: helm chart version
+ type: string
+ config_dir:
+ description: config file dir
+ default: '/opt/manager/resources/'
+ type: string
+ namespace:
+ description: k8s namespace
+ default: onap
+ config:
+ description: String format config file
+ type: string
+ default: ''
+ config_set:
+ description: String format config file
+ type: string
+ default: ''
+ config_url:
+ description: String format config file url
+ type: string
+ default: ''
+ config_format:
+ description: String format config file format
+ type: string
+ default: 'json'
+ runtime_config:
+ default: ''
+ description: String format json object. To save the runtime config generate from other nodes.
+ tls_enable:
+ description: enable helm TSL
+ type: boolean
+ default: false
+ ca:
+ description: value of ca.pem
+ type: string
+ default: ''
+ cert:
+ description: value of cert.pem
+ type: string
+ default: ''
+ key:
+ description: value of key.pem
+ type: string
+ default: ''
+ stable_repo_url:
+ description: URL for stable repository
+ type: string
+ default: 'https://kubernetes-charts.storage.googleapis.com'
+ repo_user:
+ type: string
+ default: ''
+ repo_user_password:
+ type: string
+ default: ''
+
+
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ configure: helm-plugin.plugin.tasks.config
+ start: helm-plugin.plugin.tasks.start
+ stop: helm-plugin.plugin.tasks.stop
+ upgrade: helm-plugin.plugin.tasks.upgrade
+ rollback: helm-plugin.plugin.tasks.rollback
+
+
+workflows:
+ upgrade:
+ mapping: helm-plugin.plugin.workflows.upgrade
+ parameters:
+ node_instance_id:
+ description: The id of the node-instance that you want to modify.
+ config_set:
+ description: The set option string
+ default: ''
+ config:
+ description: The changes to the new config json
+ default: ''
+ config_url:
+ description: The config input url
+ default: ''
+ config_format:
+ description: The config url input format
+ default: 'json'
+ chart_version:
+ description: chart version
+ chart_repo_url:
+ description: chart repo url
+ rollback:
+ mapping: helm-plugin.plugin.workflows.rollback
+ parameters:
+ node_instance_id:
+ description: The id of the node-instance that you want to modify.
+ default: 'node_instance_id'
+ revision:
+ description: Check the node runtime property history, find the revision number you want to rollback to
+ default: 1
diff --git a/helm/plugin/tests/test_plugin.py b/helm/plugin/tests/test_plugin.py
new file mode 100644
index 0000000..e18475a
--- /dev/null
+++ b/helm/plugin/tests/test_plugin.py
@@ -0,0 +1,192 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2018-2020 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+
+
+from os import path
+import unittest
+import mock
+import plugin.tasks
+
+from cloudify.test_utils import workflow_test
+from cloudify.mocks import MockNodeInstanceContext
+from cloudify.mocks import MockCloudifyContext
+from cloudify.state import current_ctx
+from cloudify import ctx
+
+
+class TestPlugin(unittest.TestCase):
+
+ @workflow_test(path.join('blueprint', 'blueprint.yaml'),
+ resources_to_copy=[(path.join('blueprint', 'plugin',
+ 'test_plugin.yaml'),
+ 'plugin')])
+ @mock.patch('plugin.tasks.os.remove')
+ @mock.patch('plugin.tasks.execute_command')
+ def test_stop(self, cfy_local, mock_execute_command, mock_os_remove):
+ # execute install workflow
+ """
+
+ :param cfy_local:
+ """
+ with mock.patch('plugin.tasks.shutil.rmtree'):
+ cfy_local.execute('uninstall', task_retries=0)
+
+ # extract single node instance
+ instance = cfy_local.storage.get_node_instances()[0]
+
+ mock_execute_command.assert_called_with('helm delete --purge onap-test_node --host 1.1.1.1:8888 ')
+
+ @workflow_test(path.join('blueprint', 'blueprint.yaml'),
+ resources_to_copy=[(path.join('blueprint', 'plugin',
+ 'test_plugin.yaml'),
+ 'plugin')])
+ @mock.patch('plugin.tasks.execute_command')
+ def test_start(self, cfy_local, mock_execute_command):
+ # execute install workflow
+ """
+
+ :param cfy_local:
+ """
+ with mock.patch('plugin.tasks.config'):
+ with mock.patch('plugin.tasks.get_current_helm_value'):
+ with mock.patch('plugin.tasks.get_helm_history'):
+ cfy_local.execute('install', task_retries=0)
+
+ # extract single node instance
+ instance = cfy_local.storage.get_node_instances()[0]
+
+ mock_execute_command.assert_called_with('helm install local/test_node-2.0.0.tgz --name onap-test_node --namespace onap --host 1.1.1.1:8888 ')
+
+ @workflow_test(path.join('blueprint', 'blueprint.yaml'),
+ resources_to_copy=[(path.join('blueprint', 'plugin',
+ 'test_plugin.yaml'),
+ 'plugin')])
+ @mock.patch('plugin.tasks.execute_command')
+ def test_config(self, cfy_local, mock_execute_command):
+ # execute install workflow
+ """
+
+ :param cfy_local:
+ """
+ with mock.patch('plugin.tasks.start'):
+ cfy_local.execute('install', task_retries=0)
+
+ # extract single node instance
+ instance = cfy_local.storage.get_node_instances()[0]
+
+ mock_execute_command.assert_called_with('helm init --client-only --stable-repo-url http://0.0.0.0/stable')
+
+ @workflow_test(path.join('blueprint', 'blueprint.yaml'),
+ resources_to_copy=[(path.join('blueprint', 'plugin',
+ 'test_plugin.yaml'),
+ 'plugin')])
+ def test_rollback(self, cfy_local):
+ # execute install workflow
+ """
+
+ :param cfy_local:
+ """
+ node_instance_id = 'node_instance_id'
+ revision = 1
+ try:
+ cfy_local.execute('rollback', task_retries=0,
+ parameters={'node_instance_id': node_instance_id, 'revision': revision})
+ self.fail('Expected exception due to operation not exist')
+ except Exception as e:
+ self.assertTrue('operation not available')
+
+ @workflow_test(path.join('blueprint', 'blueprint.yaml'),
+ resources_to_copy=[(path.join('blueprint', 'plugin',
+ 'test_plugin.yaml'),
+ 'plugin')])
+ def test_upgrade(self, cfy_local):
+ # execute install workflow
+ """
+
+ :param cfy_local:
+ """
+ node_instance_id = 'node_instance_id'
+ config_json = ''
+ config_url = 'http://test:test@11.22.33.44:80/stable'
+ config_format = 'json'
+ chartVersion = '2.0.0'
+ chartRepo = 'repo'
+ repo_user = ''
+ repo_user_passwd = ''
+ try:
+ cfy_local.execute('upgrade', task_retries=0,
+ parameters={'node_instance_id': node_instance_id, 'config': config_json,
+ 'config_url': config_url, 'config_format': config_format,
+ 'chart_version': chartVersion, 'chart_repo_url': chartRepo,
+ 'repo_user': repo_user, 'repo_user_password': repo_user_passwd})
+ self.fail('Expected exception due to operation not exist')
+ except Exception as e:
+ self.assertTrue('operation not available')
+
+ @mock.patch('plugin.tasks.execute_command')
+ def test_op_rollback(self, mock_execute_command):
+ # test operation rollback
+ """
+
+ :rollback operation test:
+ """
+ props = {
+ 'component_name': 'test_node',
+ 'namespace': 'onap',
+ 'tiller_port': '8888',
+ 'tiller_ip': '1.1.1.1',
+ 'tls_enable': 'false'
+ }
+ args = {'revision': '1'}
+ mock_ctx = MockCloudifyContext(node_id='test_node_id', node_name='test_node_name',
+ properties=props)
+ try:
+ current_ctx.set(mock_ctx)
+ with mock.patch('plugin.tasks.get_current_helm_value'):
+ with mock.patch('plugin.tasks.get_helm_history'):
+ plugin.tasks.rollback(**args)
+ finally:
+ current_ctx.clear()
+
+ @mock.patch('plugin.tasks.execute_command')
+ def test_op_upgrade(self, mock_execute_command):
+ # test operation upgrade
+ """
+
+ :upgrade operation test:
+ """
+ props = {
+ 'component_name': 'test_node',
+ 'namespace': 'onap',
+ 'tiller_port': '8888',
+ 'tiller_ip': '1.1.1.1',
+ 'tls_enable': 'false',
+ 'config_dir': '/tmp'
+ }
+ args = {'revision': '1', 'config': '', 'chart_repo': 'repo', 'chart_version': '2',
+ 'config_set': 'config_set', 'config_json': '', 'config_url': '',
+ 'config_format': 'format', 'repo_user': '', 'repo_user_passwd': ''}
+ mock_ctx = MockCloudifyContext(node_id='test_node_id', node_name='test_node_name',
+ properties=props)
+ try:
+ current_ctx.set(mock_ctx)
+ with mock.patch('plugin.tasks.get_current_helm_value'):
+ with mock.patch('plugin.tasks.get_helm_history'):
+ with mock.patch('plugin.tasks.gen_config_str'):
+ plugin.tasks.upgrade(**args)
+ finally:
+ current_ctx.clear()
diff --git a/helm/plugin/workflows.py b/helm/plugin/workflows.py
new file mode 100644
index 0000000..f916eac
--- /dev/null
+++ b/helm/plugin/workflows.py
@@ -0,0 +1,75 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2018-2020 AT&T
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+
+from cloudify.decorators import workflow
+from cloudify.workflows import ctx
+from cloudify.exceptions import NonRecoverableError
+import json
+import yaml
+import base64
+
+
+@workflow
+def upgrade(node_instance_id, config_set, config, config_url, config_format,
+ chart_version, chart_repo_url, repo_user, repo_user_password, **kwargs):
+ node_instance = ctx.get_node_instance(node_instance_id)
+
+ if not node_instance_id:
+ raise NonRecoverableError(
+ 'No such node_instance_id in deployment: {0}.'.format(
+ node_instance_id))
+
+ kwargs = {}
+ kwargs['config'] = ''
+ kwargs['chart_version'] = str(chart_version)
+ kwargs['chart_repo'] = str(chart_repo_url)
+ kwargs['config_set'] = str(config_set)
+ kwargs['config_json'] = str(config)
+ kwargs['config_url'] = str(config_url)
+ kwargs['config_format'] = str(config_format)
+ kwargs['repo_user'] = str(repo_user)
+ kwargs['repo_user_passwd'] = str(repo_user_password)
+ operation_args = {'operation': 'upgrade', }
+ operation_args['kwargs'] = kwargs
+ node_instance.execute_operation(**operation_args)
+
+
+@workflow
+def rollback(node_instance_id, revision, **kwargs):
+ node_instance = ctx.get_node_instance(node_instance_id)
+
+ if not node_instance_id:
+ raise NonRecoverableError(
+ 'No such node_instance_id in deployment: {0}.'.format(
+ node_instance_id))
+
+ kwargs = {}
+ kwargs['revision'] = str(revision)
+ operation_args = {'operation': 'rollback', }
+ operation_args['kwargs'] = kwargs
+ node_instance.execute_operation(**operation_args)
+
+@workflow
+def status(**kwargs):
+
+ for node in ctx.nodes:
+ for node_instance in node.instances:
+ kwargs = {}
+ operation_args = {'operation': 'status', }
+ operation_args['kwargs'] = kwargs
+ node_instance.execute_operation(**operation_args)
diff --git a/helm/pom.xml b/helm/pom.xml
new file mode 100644
index 0000000..caf1331
--- /dev/null
+++ b/helm/pom.xml
@@ -0,0 +1,336 @@
+<?xml version="1.0"?>
+<!--
+============LICENSE_START=======================================================
+org.onap.dcaegen2
+================================================================================
+Copyright (c) 2017,2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.dcaegen2.platform</groupId>
+ <artifactId>plugins</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+
+ <!--- CHANGE THE FOLLOWING 3 OBJECTS for your own repo -->
+ <groupId>org.onap.dcaegen2.platform.plugins</groupId>
+ <artifactId>helm</artifactId>
+ <name>helm</name>
+
+ <version>4.2.0-SNAPSHOT</version>
+ <url>http://maven.apache.org</url>
+ <properties>
+ <!-- name from the setup.py file -->
+ <plugin.name>helm</plugin.name>
+ <!-- path to directory containing the setup.py relative to this file -->
+ <plugin.subdir>.</plugin.subdir>
+ <!-- path of types file itself relative to this file -->
+ <typefile.source>helm-type.yaml</typefile.source>
+ <!-- path, in repo, to store type file -->
+ <typefile.dest>type_files/helm/4.0.2/helm-type.yaml</typefile.dest>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sonar.sources>.</sonar.sources>
+ <sonar.junit.reportsPath>nosetests.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPaths>coverage.xml</sonar.python.coverage.reportPaths>
+ <sonar.language>py</sonar.language>
+ <sonar.pluginName>Python</sonar.pluginName>
+ <sonar.inclusions>**/*.py</sonar.inclusions>
+ <sonar.exclusions>plugin/tests/*,setup.py</sonar.exclusions>
+ </properties>
+
+ <build>
+ <finalName>${project.artifactId}-${project.version}</finalName>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>sonar-maven-plugin</artifactId>
+ <version>2.7.1</version>
+ </plugin>
+
+ <!-- nexus-staging-maven-plugin is called during deploy phase by default behavior.
+ we do not need it -->
+ <plugin>
+ <groupId>org.sonatype.plugins</groupId>
+ <artifactId>nexus-staging-maven-plugin</artifactId>
+ <version>1.6.7</version>
+ <configuration>
+ <skipNexusStagingDeployMojo>true</skipNexusStagingDeployMojo>
+ </configuration>
+ </plugin>
+
+ <!-- maven-deploy-plugin is called during deploy but we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <version>2.8</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <!-- maven-deploy-plugin is called during deploy but we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <version>2.8</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
+ <plugins>
+
+ <!-- first disable the default Java plugins at various stages -->
+ <!-- maven-resources-plugin is called during "*resource" phases by default behavior. it prepares the resources
+ dir. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>2.6</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-compiler-plugin is called during "compile" phases by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.1</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-jar-plugin is called during "compile" phase by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.4</version>
+ <executions>
+ <execution>
+ <id>default-jar</id>
+ <phase/>
+ </execution>
+ </executions>
+ </plugin>
+
+ <!-- maven-install-plugin is called during "install" phase by default behavior. it tries to copy stuff under
+ target dir to ~/.m2. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <version>2.4</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-surefire-plugin is called during "test" phase by default behavior. it triggers junit test.
+ we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.12.4</version>
+ <configuration>
+ <skipTests>true</skipTests>
+ </configuration>
+ </plugin>
+
+ <!-- now we configure custom action (calling a script) at various lifecycle phases -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>clean phase script</id>
+ <phase>clean</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>clean</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>generate-sources script</id>
+ <phase>generate-sources</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>generate-sources</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>compile script</id>
+ <phase>compile</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>compile</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>package script</id>
+ <phase>package</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>package</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>test script</id>
+ <phase>test</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>test</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>install script</id>
+ <phase>install</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>install</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>deploy script</id>
+ <phase>deploy</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>deploy</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <MVN_SERVER_ID>${project.distributionManagement.snapshotRepository.id}</MVN_SERVER_ID>
+ <TYPE_FILE_SOURCE>${typefile.source}</TYPE_FILE_SOURCE>
+ <TYPE_FILE_DEST>${typefile.dest}</TYPE_FILE_DEST>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/helm/requirements.txt b/helm/requirements.txt
new file mode 100644
index 0000000..038951f
--- /dev/null
+++ b/helm/requirements.txt
@@ -0,0 +1,17 @@
+pyyaml>=3.12
+#cloudify-common>=5.0.5
+# The released version of cloudify-common has limited python3 support.
+#
+# This build linked from github is more complete in this regard, and
+# has at least the tests written for pgaas passing. The other plugins
+# do not seem to have tests that exercise the unconverted parts.
+#
+# The build was created from a WIP branch, but only parts of the branch
+# were merged for release 5.0.5.
+#
+# It means that while this plugin is ready for python3, all the plugins
+# will need to wait for a python3-supporting release of cloudify.
+# When such a version is released, the single requirement should suffice.
+# The install_requires in setup.py may also be uncommented then.
+cloudify-common>=5.0.5; python_version<"3"
+cloudify-common @ git+https://github.com/cloudify-cosmo/cloudify-common@cy-1374-python3#egg=cloudify-common==5.0.5; python_version>="3"
diff --git a/helm/setup.py b/helm/setup.py
new file mode 100644
index 0000000..06e1b7a
--- /dev/null
+++ b/helm/setup.py
@@ -0,0 +1,48 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2018-2020 AT&T
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+
+
+from setuptools import setup
+
+# Replace the place holders with values for your project
+
+setup(
+
+ # Do not use underscores in the plugin name.
+ name='helm',
+ version='4.2.0',
+ author='Nicolas Hu(AT&T)',
+ author_email='jh245g@att.com',
+ description='This plugin will install/uninstall/upgrade/rollback helm '
+ 'charts of ONAP components. ',
+
+ # This must correspond to the actual packages in the plugin.
+ packages=['plugin'],
+
+ license='LICENSE',
+ zip_safe=False,
+ install_requires=[
+ 'pyyaml>=3.12',
+ # The package specified by requirements would be replaced with 5.0.5.1+
+ # when this package is installed. That currently breaks on python3.
+ #'cloudify-common>=5.0.5',
+ ],
+ test_requires=[
+ 'nose',
+ ],
+)
diff --git a/helm/tox.ini b/helm/tox.ini
new file mode 100644
index 0000000..ca42099
--- /dev/null
+++ b/helm/tox.ini
@@ -0,0 +1,76 @@
+# ============LICENSE_START====================================================
+# =============================================================================
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+[tox]
+envlist = flake8,py27,py36,py37,py38
+skip_missing_interpreters = true
+
+[testenv:py27]
+deps =
+ # this fixes issue with tox installing coverage --pre
+ coverage==3.7.1
+ nose-cov
+ mock
+ testfixtures
+ nose
+ -rrequirements.txt
+
+[testenv:py36]
+deps =
+ coverage
+ nose-cov
+ mock
+ testfixtures
+ nose
+ -rrequirements.txt
+
+[testenv:py37]
+deps =
+ coverage
+ nose-cov
+ mock
+ testfixtures
+ nose
+ -rrequirements.txt
+
+[testenv:py38]
+deps =
+ coverage
+ nose-cov
+ mock
+ testfixtures
+ nose
+ -rrequirements.txt
+
+[testenv]
+passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
+
+# commands=nosetests --with-cov --cov-report term-missing --cov plugin plugin/tests
+# commands=nosetests --with-xunit --with-cov --cov-report=xml --cov plugin plugin/tests
+commands=nosetests --with-xunit --with-cov --cov-report=xml --cov plugin
+ coverage report
+ coverage html
+
+[flake8]
+ignore=E302,F401,E501,E712,F811,F841,E127,E128.W291
+
+[testenv:flake8]
+deps =
+ flake8
+ -rrequirements.txt
+commands=flake8 plugin
diff --git a/k8s/.gitignore b/k8s/.gitignore
index be63b67..6996455 100644
--- a/k8s/.gitignore
+++ b/k8s/.gitignore
@@ -1,71 +1,2 @@
+# local additions to plugins .gitignore
cfyhelper
-.cloudify
-*.swp
-*.swn
-*.swo
-.DS_Store
-.project
-.pydevproject
-venv
-
-
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-env/
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-*.egg-info/
-.installed.cfg
-*.egg
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.coverage
-.coveragerc
-.coverage.*
-.cache
-.pytest_cache/
-xunit-results.xml
-nosetests.xml
-coverage.xml
-*,cover
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
diff --git a/k8s/LICENSE.txt b/k8s/LICENSE.txt
index 43098d1..a4ece14 100644
--- a/k8s/LICENSE.txt
+++ b/k8s/LICENSE.txt
@@ -1,7 +1,7 @@
============LICENSE_START=======================================================
org.onap.dcae
================================================================================
-Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,10 +16,9 @@ See the License for the specific language governing permissions and
limitations under the License.
============LICENSE_END=========================================================
-ECOMP is a trademark and service mark of AT&T Intellectual Property.
-Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
===================================================================
Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
you may not use this documentation except in compliance with the License.
diff --git a/k8s/configure/__init__.py b/k8s/configure/__init__.py
index b986659..8099c73 100644
--- a/k8s/configure/__init__.py
+++ b/k8s/configure/__init__.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,4 +16,3 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property. \ No newline at end of file
diff --git a/k8s/k8s-node-type.yaml b/k8s/k8s-node-type.yaml
index 3220bba..af50e70 100644
--- a/k8s/k8s-node-type.yaml
+++ b/k8s/k8s-node-type.yaml
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2020 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +15,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
diff --git a/k8s/k8sclient/__init__.py b/k8s/k8sclient/__init__.py
index 6a53440..9974199 100644
--- a/k8s/k8sclient/__init__.py
+++ b/k8s/k8sclient/__init__.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,5 +16,4 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-from .k8sclient import deploy, undeploy, is_available, scale, upgrade, rollback, execute_command_in_deployment, parse_ports \ No newline at end of file
+from .k8sclient import deploy, undeploy, is_available, scale, upgrade, rollback, execute_command_in_deployment, parse_ports
diff --git a/k8s/k8sclient/k8sclient.py b/k8s/k8sclient/k8sclient.py
index e73d96a..0096182 100644
--- a/k8s/k8sclient/k8sclient.py
+++ b/k8s/k8sclient/k8sclient.py
@@ -17,7 +17,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import os
import re
import uuid
diff --git a/k8s/k8splugin/__init__.py b/k8s/k8splugin/__init__.py
index 7ca69b7..85e4411 100644
--- a/k8s/k8splugin/__init__.py
+++ b/k8s/k8splugin/__init__.py
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
# REVIEW: Tried to source the version from here but you run into import issues
# because "tasks" module is loaded. This method seems to be the PEP 396
@@ -25,4 +24,4 @@
from .tasks import create_for_components, create_for_components_with_streams, \
create_and_start_container, create_and_start_container_for_components, \
- stop_and_remove_container, cleanup_discovery, policy_update, scale, update_image \ No newline at end of file
+ stop_and_remove_container, cleanup_discovery, policy_update, scale, update_image
diff --git a/k8s/k8splugin/cloudify_importer.py b/k8s/k8splugin/cloudify_importer.py
index 10a6cf5..b2e7bad 100644
--- a/k8s/k8splugin/cloudify_importer.py
+++ b/k8s/k8splugin/cloudify_importer.py
@@ -1,6 +1,7 @@
# #######
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
# Copyright (c) 2019 Pantheon.tech. All rights reserved
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/k8s/k8splugin/decorators.py b/k8s/k8splugin/decorators.py
index b9b32bf..4137601 100644
--- a/k8s/k8splugin/decorators.py
+++ b/k8s/k8splugin/decorators.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import copy
diff --git a/k8s/k8splugin/discovery.py b/k8s/k8splugin/discovery.py
index 76c160a..99f5023 100644
--- a/k8s/k8splugin/discovery.py
+++ b/k8s/k8splugin/discovery.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,7 +17,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import json
import logging
diff --git a/k8s/k8splugin/exceptions.py b/k8s/k8splugin/exceptions.py
index 0d8a341..1c398c6 100644
--- a/k8s/k8splugin/exceptions.py
+++ b/k8s/k8splugin/exceptions.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
class DockerPluginDeploymentError(RuntimeError):
pass
diff --git a/k8s/k8splugin/utils.py b/k8s/k8splugin/utils.py
index 6475aaa..73eef61 100644
--- a/k8s/k8splugin/utils.py
+++ b/k8s/k8splugin/utils.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,7 +17,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import string
import random
diff --git a/k8s/tests/common.py b/k8s/tests/common.py
index 02b57e6..1801433 100644
--- a/k8s/tests/common.py
+++ b/k8s/tests/common.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2019-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2020 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/k8s/tests/conftest.py b/k8s/tests/conftest.py
index 4716b5a..ae09da1 100644
--- a/k8s/tests/conftest.py
+++ b/k8s/tests/conftest.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2018-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import pytest
diff --git a/k8s/tests/test_decorators.py b/k8s/tests/test_decorators.py
index 552fa4b..2cb8288 100644
--- a/k8s/tests/test_decorators.py
+++ b/k8s/tests/test_decorators.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
def test_wrapper_merge_inputs(mockconfig):
from k8splugin import decorators as dec
@@ -31,4 +30,4 @@ def test_wrapper_merge_inputs(mockconfig):
expected = { "app_config": {"nested": { "a": 789, "b": 456, "c": "zyx" },
"foo": "duh"}, "image": "some-docker-image" }
- assert expected == dec._wrapper_merge_inputs(task_func, properties, **kwargs) \ No newline at end of file
+ assert expected == dec._wrapper_merge_inputs(task_func, properties, **kwargs)
diff --git a/k8s/tests/test_discovery.py b/k8s/tests/test_discovery.py
index 24e45ee..660a820 100644
--- a/k8s/tests/test_discovery.py
+++ b/k8s/tests/test_discovery.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import pytest
from functools import partial
@@ -24,7 +23,7 @@ import requests
def test_wrap_consul_call(mockconfig):
from k8splugin import discovery as dis
-
+
def foo(a, b, c="default"):
return " ".join([a, b, c])
@@ -41,11 +40,11 @@ def test_wrap_consul_call(mockconfig):
def test_generate_service_component_name(mockconfig):
from k8splugin import discovery as dis
-
+
component_type = "some-component-type"
name = dis.generate_service_component_name(component_type)
assert name.split("-", 1)[1] == component_type
-
+
def test_find_matching_services(mockconfig):
from k8splugin import discovery as dis
@@ -68,4 +67,4 @@ def test_is_healthy_pure(mockconfig):
def fake_is_healthy(name):
return 0, [{ "Checks": [{"Status": "passing"}] }]
- assert True == dis._is_healthy_pure(fake_is_healthy, "some-component") \ No newline at end of file
+ assert True == dis._is_healthy_pure(fake_is_healthy, "some-component")
diff --git a/k8s/tests/test_k8sclient.py b/k8s/tests/test_k8sclient.py
index fcc6b95..4f669d8 100644
--- a/k8s/tests/test_k8sclient.py
+++ b/k8s/tests/test_k8sclient.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2018-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/k8s/tests/test_k8sclient_deploy.py b/k8s/tests/test_k8sclient_deploy.py
index 4e8a11d..3755855 100644
--- a/k8s/tests/test_k8sclient_deploy.py
+++ b/k8s/tests/test_k8sclient_deploy.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2018-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/k8s/tests/test_tasks.py b/k8s/tests/test_tasks.py
index 7f5d753..b82a4ae 100644
--- a/k8s/tests/test_tasks.py
+++ b/k8s/tests/test_tasks.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import copy
import pytest
diff --git a/k8s/tests/test_utils.py b/k8s/tests/test_utils.py
index 0b7cba4..7e01a91 100644
--- a/k8s/tests/test_utils.py
+++ b/k8s/tests/test_utils.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
def test_random_string(mockconfig):
from k8splugin import utils
diff --git a/k8s/tox.ini b/k8s/tox.ini
index 5b750e3..a8722d6 100644
--- a/k8s/tox.ini
+++ b/k8s/tox.ini
@@ -1,5 +1,6 @@
[tox]
-envlist = py27,py36,cov
+envlist = py27,py36,py37,py38,cov
+skip_missing_interpreters = true
[testenv]
# coverage can only find modules if pythonpath is set
@@ -24,6 +25,8 @@ setenv=
commands=
coverage combine
coverage xml
+ coverage report
+ coverage html
[pytest]
junit_family = xunit2
diff --git a/makefile b/makefile
new file mode 100644
index 0000000..7071d5e
--- /dev/null
+++ b/makefile
@@ -0,0 +1,27 @@
+none:
+ @echo make verify-versions
+
+verify-versions:
+ @for i in */pom.xml; do \
+ p=$$(dirname $$i); \
+ v=$$(grep "<version>" $$i | sed 2q | tail -n 1 | sed -e 's!</\?version>!!g' -e 's/-SNAPSHOT//' -e 's/[[:space:]]//g'); \
+ if grep 'version[[:space:]]*=[[:space:]]*["'"']$$v['"'"]' $$p/setup.py > /dev/null; then \
+ echo "$$i version $$v verified in $$p/setup.py"; \
+ else \
+ grep -n "<version>" $$i /dev/null | sed 2q | tail -n 1; \
+ grep -n "version[[:space:]]*=" $$p/setup.py /dev/null; \
+ echo "$$i version $$v not found in $$p/setup.py. Instead found the above version."; \
+ exit 1 ; \
+ fi; \
+ done
+ @pomv=$$(grep "<version>" pom.xml | sed 2q | tail -n 1 | sed -e 's!</\?version>!!g' -e 's/[[:space:]]//g'); \
+ for i in */pom.xml; do \
+ v=$$(grep "<version>" $$i | sed 1q | sed -e 's!</\?version>!!g' -e 's/[[:space:]]//g'); \
+ if [ "$$pomv" = "$$v" ]; then \
+ echo "pom.xml version $$pomv verified in $$i"; \
+ else \
+ grep -n "<version>" $$i /dev/null | sed 1q; \
+ echo "pom.xml version $$pomv not found in $$i. Instead found $$v"; \
+ exit 1 ; \
+ fi; \
+ done
diff --git a/mvn-phase-lib.sh b/mvn-phase-lib.sh
index 7b947e7..9dc4e9a 100644
--- a/mvn-phase-lib.sh
+++ b/mvn-phase-lib.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# ================================================================================
-# Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -101,13 +101,6 @@ clean_tox_files()
expand_templates()
{
set +x
- # set up env variables, get ready for template resolution
- # NOTE: CCSDK artifacts do not distinguish REALESE vs SNAPSHOTs
- export ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.ccsdk.platform.plugins"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_snapshots="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.ccsdk.platform.plugins"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_blueprints_releases="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.ccsdk.platform.blueprints"
- export ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_blueprints_snapshots="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.ccsdk.platform.blueprints"
-
if [ -z "$RELEASE_TAG" ]; then
export ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_releases="$MVN_RAWREPO_BASEURL_DOWNLOAD/org.onap.dcaegen2/releases"
@@ -233,10 +226,13 @@ run_tox_test()
rm -rf ./venv-tox ./.tox
virtualenv ./venv-tox
source ./venv-tox/bin/activate
- pip install pip==9.0.3
+
+ pip install --upgrade pip
+ pip install --upgrade setuptools
pip install --upgrade argparse
- pip install tox==2.9.1
+ pip install tox
pip freeze
+ pwd
tox
deactivate
rm -rf ./venv-tox ./.tox
diff --git a/mvn-phase-script.sh b/mvn-phase-script.sh
index 7f8e58d..8f053d6 100755
--- a/mvn-phase-script.sh
+++ b/mvn-phase-script.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pgaas/LICENSE.txt b/pgaas/LICENSE.txt
new file mode 100644
index 0000000..df9e931
--- /dev/null
+++ b/pgaas/LICENSE.txt
@@ -0,0 +1,17 @@
+org.onap.dcaegen2
+============LICENSE_START=======================================================
+================================================================================
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
diff --git a/pgaas/MANIFEST.in b/pgaas/MANIFEST.in
new file mode 100644
index 0000000..eb3cd9c
--- /dev/null
+++ b/pgaas/MANIFEST.in
@@ -0,0 +1 @@
+exclude *~
diff --git a/pgaas/README.md b/pgaas/README.md
new file mode 100644
index 0000000..61f1b90
--- /dev/null
+++ b/pgaas/README.md
@@ -0,0 +1,79 @@
+# PGaaS Plugin
+Cloudify PGaaS plugin description and configuraiton
+# Description
+The PGaaS plugin allows users to deploy PostgreSQL application databases, and retrieve access credentials for such databases, as part of a Cloudify blueprint.
+# Plugin Requirements
+* Python versions
+ * 2.7.x
+* System dependencies
+ * psycopg2
+
+Note: These requirements apply to the VM where Cloudify Manager itself runs.
+
+Note: The psycopg2 requirement is met by running "yum install python-psycopg2" on the Cloudify Manager VM.
+
+Note: Cloudify Manager, itself, requires Python 2.7.x (and Centos 7).
+
+# Types
+## dcae.nodes.pgaas.cluster
+**Derived From:** cloudify.nodes.Root
+
+**Properties:**
+
+* `writerfqdn` (required string) The FQDN used for read-write access to the
+cluster containing the postgres database instance. This is used to identify
+and access a particular database instance and to record information about
+that instance on Cloudify Manager.
+* `use_existing` (optional boolean default=false) This is used to reference
+a database instance, in one blueprint, that was deployed in a different one.
+If it is `true`, then the `readerfqdn` property must not be set and this node
+must not have any `dcae.relationships.pgaas_cluster_uses_sshkeypair`
+relationships. If it is `false`, then this node must have exactly one
+`dcae.relationships.pgaas_cluster_uses_sshkeypair` relationship.
+* `readerfqdn` (optional string default=value of `writerfqdn`) The FQDN used for read-only access to the cluster containing the postgres database instance, if different than the FQDN used for read-write access. This will be used by viewer roles.
+
+**Mapped Operations:**
+
+* `cloudify.interfaces.lifecycle.create` validates and records information about the cluster on the Cloudify Manager server in /opt/manager/resources/pgaas/`writerfqdn`.
+* `cloudify.interfaces.lifecycle.delete` deletes previously recorded information from the Cloudify Manager server.
+
+Note: When `use_existing` is `true`, the create operation validates but does not record, and delete does nothing. Delete also does nothing when validation has failed.
+
+**Attributes:**
+This type has no runtime attributes
+
+## dcae.nodes.pgaas.database
+**Derived From:** cloudify.nodes.Root
+
+**Properties:**
+* `name` (required string) The name of the application database, in postgres. This name is also used to create the names of the roles used to access the database, and the schema made available to users of the database.
+* `use_existing` (optional boolean default=false) This is used to reference an application database, in one blueprint, that was deployed in a different one. If true, and this node has a dcae.relationships.database_runson_pgaas_cluster relationship, the dcae.nodes.pgaas.cluster node that is the target of that relationship must also have it's `use_existing` property set to true.
+* `writerfqdn` (optional string) This can be used as an alternative to specifying the cluster, for the application database, with a dcae.relationships.database_runson_pgaas_cluster relationship to a dcae.nodes.pgaas.cluster node. Exactly one of the two options must be used. The relationship method must be used if this blueprint is deploying both the cluster and the application database on the cluster.
+
+**Mapped Operations:**
+
+* `cloudify.interfaces.lifecycle.create` creates the application database, and various roles for admin/user/viewer access to it.
+* `cloudify.interfaces.lifecycle.delete` deletes the application database and roles
+
+Note: When `use_existing` is true, create and delete do not create or delete the application database or associated roles. Create still sets runtime attributes (see below).
+
+**Attributes:**
+
+* `admin` a dict containing access information for adminstrative access to the application database.
+* `user` a dict containing access information for user access to the application database.
+* `viewer` a dict containing access information for read-only access to the application database.
+
+The keys in the access information dicts are as follows:
+
+* `database` the name of the application database.
+* `host` the appropriate FQDN for accessing the application database, (writerfqdn or readerfqdn, based on the type of access).
+* `user` the user role for accessing the database.
+* `password` the password corresponding to the user role.
+
+# Relationships
+## dcae.relationships.pgaas_cluster_uses_sshkeypair
+**Description:** A relationship for binding a dcae.nodes.pgaas.cluster node to the dcae.nodes.ssh.keypair used by the cluster to initialize the database access password for the postgres role. The password for the postgres role is expected to be the hex representation of the MD5 hash of 'postgres' and the contents of the id_rsa (private key) file for the ssh keypair. A dcae.nodes.pgaas.cluster node must have such a relationship if and only if it's use_existing property is false.
+## dcae.relationships.dcae.relationships.database_runson_pgaas_cluster
+**Description:** A relationship for binding a dcae.nodes.pgaas.database node to the dcae.nodes.pgaas.cluster node that contains the application database. A dcae.nodes.pgaas.database node must have either such a relationship or a writerfqdn property. The writerfqdn property cannot be used if the cluster is created in the same blueprint as the application database.
+## dcae.relationships.application_uses_pgaas_database
+**Description:** A relationship for binding a node that needs application database access information to the dcae.nodes.pgaas.database node for that application database.
diff --git a/pgaas/pgaas/__init__.py b/pgaas/pgaas/__init__.py
new file mode 100644
index 0000000..4f8c969
--- /dev/null
+++ b/pgaas/pgaas/__init__.py
@@ -0,0 +1,13 @@
+"""
+PostgreSQL plugin to manage passwords
+"""
+import logging
+
+def get_module_logger(mod_name):
+ logger = logging.getLogger(mod_name)
+ handler=logging.StreamHandler()
+ formatter=logging.Formatter('%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+ logger.setLevel(logging.DEBUG)
+ return logger
diff --git a/pgaas/pgaas/logginginterface.py b/pgaas/pgaas/logginginterface.py
new file mode 100644
index 0000000..44ddce9
--- /dev/null
+++ b/pgaas/pgaas/logginginterface.py
@@ -0,0 +1,53 @@
+# org.onap.dcaegen2
+# ============LICENSE_START====================================================
+# =============================================================================
+# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+"""
+PostgreSQL plugin to manage passwords
+"""
+
+from cloudify import ctx
+
+# pragma pylint: disable=bad-indentation
+
+def debug(msg):
+ """
+ Print a debugging message.
+ This is a handy endpoint to add other extended debugging calls.
+ """
+ ctx.logger.debug(msg)
+
+def warn(msg):
+ """
+ Print a warning message.
+ This is a handy endpoint to add other extended warning calls.
+ """
+ ctx.logger.warn(msg)
+
+def error(msg):
+ """
+ Print an error message.
+ This is a handy endpoint to add other extended error calls.
+ """
+ ctx.logger.error(msg)
+
+def info(msg):
+ """
+ Print a info message.
+ This is a handy endpoint to add other extended info calls.
+ """
+ ctx.logger.info(msg)
diff --git a/pgaas/pgaas/pgaas_plugin.py b/pgaas/pgaas/pgaas_plugin.py
new file mode 100644
index 0000000..f437bd9
--- /dev/null
+++ b/pgaas/pgaas/pgaas_plugin.py
@@ -0,0 +1,779 @@
+# org.onap.dcaegen2
+# ============LICENSE_START====================================================
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+"""
+PostgreSQL plugin to manage passwords
+"""
+
+from __future__ import print_function
+import sys
+import os
+import re
+import json
+import hashlib
+import socket
+import traceback
+import base64
+import binascii
+import collections
+try:
+ from urllib.parse import quote
+except ImportError:
+ from urllib import quote
+
+from cloudify import ctx
+from cloudify.decorators import operation
+from cloudify.exceptions import NonRecoverableError
+from cloudify.exceptions import RecoverableError
+
+try:
+ import psycopg2
+except ImportError:
+ # FIXME: any users of this plugin installing its dependencies in nonstandard
+ # directories should set up PYTHONPATH accordingly, outside the program code
+ SYSPATH = sys.path
+ sys.path = list(SYSPATH)
+ sys.path.append('/usr/lib64/python2.7/site-packages')
+ import psycopg2
+ sys.path = SYSPATH
+
+from pgaas.logginginterface import debug, info, warn, error
+
+
+"""
+ To set up a cluster:
+ - https://$NEXUS/repository/raw/type_files/sshkeyshare/sshkey_types.yaml
+ - https://$NEXUS/repository/raw/type_files/pgaas_types.yaml
+ sharedsshkey_pgrs:
+ type: dcae.nodes.ssh.keypair
+ pgaas_cluster:
+ type: dcae.nodes.pgaas.cluster
+ properties:
+ writerfqdn: { get_input: k8s_pgaas_instance_fqdn }
+ readerfqdn: { get_input: k8s_pgaas_instance_fqdn }
+ # OR:
+ # writerfqdn: { concat: [ { get_input: location_prefix }, '-', { get_input: pgaas_cluster_name }, '-write.', { get_input: location_domain } ] }
+ # readerfqdn: { concat: [ { get_input: location_prefix }, '-', { get_input: pgaas_cluster_name }, '.', { get_input: location_domain } ] }
+ relationships:
+ - type: dcae.relationships.pgaas_cluster_uses_sshkeypair
+ target: sharedsshkey_pgrs
+
+ To reference an existing cluster:
+ - https://$NEXUS/repository/raw/type_files/pgaas_types.yaml
+ pgaas_cluster:
+ type: dcae.nodes.pgaas.cluster
+ properties:
+ writerfqdn: { get_input: k8s_pgaas_instance_fqdn }
+ # OR: writerfqdn: { concat: [ { get_input: location_prefix }, '-',
+ # { get_input: pgaas_cluster_name }, '-write.',
+ # { get_input: location_domain } ] }
+ # OR: writerfqdn: { get_property: [ dns_pgrs_rw, fqdn ] }
+ use_existing: true
+
+ To initialize an existing server to be managed by pgaas_plugin::
+ - https://$NEXUS/repository/raw/type_files/sshkeyshare/sshkey_types.yaml
+ - https://$NEXUS/repository/raw/type_files/pgaas_types.yaml
+ pgaas_cluster:
+ type: dcae.nodes.pgaas.cluster
+ properties:
+ writerfqdn: { get_input: k8s_pgaas_instance_fqdn }
+ readerfqdn: { get_input: k8s_pgaas_instance_fqdn }
+ # OR:
+ # writerfqdn: { concat: [ { get_input: location_prefix }, '-',
+ # { get_input: pgaas_cluster_name }, '-write.',
+ # { get_input: location_domain } ] }
+ # readerfqdn: { concat: [ { get_input: location_prefix }, '-',
+ # { get_input: pgaas_cluster_name }, '.',
+ # { get_input: location_domain } ] }
+ initialpassword: { get_input: currentpassword }
+ relationships:
+ - type: dcae.relationships.pgaas_cluster_uses_sshkeypair
+ target: sharedsshkey_pgrs
+
+ - { get_attribute: [ pgaas_cluster, public ] }
+ - { get_attribute: [ pgaas_cluster, base64private ] }
+ # - { get_attribute: [ pgaas_cluster, postgrespswd ] }
+
+
+ To set up a database:
+ - http://$NEXUS/raw/type_files/pgaas_types.yaml
+ pgaasdbtest:
+ type: dcae.nodes.pgaas.database
+ properties:
+ writerfqdn: { get_input: k8s_pgaas_instance_fqdn }
+ # OR: writerfqdn: { concat: [ { get_input: location_prefix }, '-',
+ # { get_input: pgaas_cluster_name }, '-write.',
+ # { get_input: location_domain } ] }
+ # OR: writerfqdn: { get_property: [ dns_pgrs_rw, fqdn ] }
+ name: { get_input: database_name }
+
+ To reference an existing database:
+ - http://$NEXUS/raw/type_files/pgaas_types.yaml
+ $CLUSTER_$DBNAME:
+ type: dcae.nodes.pgaas.database
+ properties:
+ writerfqdn: { get_input: k8s_pgaas_instance_fqdn }
+ # OR: writerfqdn: { concat: [ { get_input: location_prefix }, '-',
+ # { get_input: pgaas_cluster_name }, '-write.',
+ # { get_input: location_domain } ] }
+ # OR: writerfqdn: { get_property: [ dns_pgrs_rw, fqdn ] }
+ name: { get_input: database_name }
+ use_existing: true
+
+ $CLUSTER_$DBNAME_admin_host:
+ description: Hostname for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, admin, host ] }
+ $CLUSTER_$DBNAME_admin_user:
+ description: Admin Username for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, admin, user ] }
+ $CLUSTER_$DBNAME_admin_password:
+ description: Admin Password for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, admin, password ] }
+ $CLUSTER_$DBNAME_user_host:
+ description: Hostname for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, user, host ] }
+ $CLUSTER_$DBNAME_user_user:
+ description: User Username for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, user, user ] }
+ $CLUSTER_$DBNAME_user_password:
+ description: User Password for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, user, password ] }
+ $CLUSTER_$DBNAME_viewer_host:
+ description: Hostname for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, viewer, host ] }
+ $CLUSTER_$DBNAME_viewer_user:
+ description: Viewer Username for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, viewer, user ] }
+ $CLUSTER_$DBNAME_viewer_password:
+ description: Viewer Password for $CLUSTER $DBNAME database
+ value: { get_attribute: [ $CLUSTER_$DBNAME, viewer, password ] }
+
+"""
+
+OPT_MANAGER_RESOURCES_PGAAS = "/opt/manager/resources/pgaas"
+
+# pylint: disable=invalid-name
+def setOptManagerResources(o): # pylint: disable=global-statement
+ """
+ Overrides the default locations of /opt/managers/resources
+ """
+ # pylint: disable=global-statement
+ global OPT_MANAGER_RESOURCES_PGAAS
+ OPT_MANAGER_RESOURCES_PGAAS = "{}/pgaas".format(o)
+
+def safestr(s):
+ """
+ returns a safely printable version of the string
+ """
+ return quote(str(s), '')
+
+def raiseRecoverableError(msg):
+ """
+ Print a warning message and raise a RecoverableError exception.
+ This is a handy endpoint to add other extended debugging calls.
+ """
+ warn(msg)
+ raise RecoverableError(msg)
+
+def raiseNonRecoverableError(msg):
+ """
+ Print an error message and raise a NonRecoverableError exception.
+ This is a handy endpoint to add other extended debugging calls.
+ """
+ error(msg)
+ raise NonRecoverableError(msg)
+
+def dbexecute(crx, cmd, args=None):
+ """
+ executes the SQL statement
+ Prints the entire command for debugging purposes
+ """
+ debug("executing {}".format(cmd))
+ crx.execute(cmd, args)
+
+
+def dbexecute_trunc_print(crx, cmd, args=None):
+ """
+ executes the SQL statement.
+ Will print only the first 30 characters in the command
+ Use this function if you are executing an SQL cmd with a password
+ """
+ debug("executing {}".format(cmd[:30]))
+ crx.execute(cmd, args)
+
+
+def waithp(host, port):
+ """
+ do a test connection to a host and port
+ """
+ debug("waithp({0},{1})".format(safestr(host), safestr(port)))
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect((host, int(port)))
+ except: # pylint: disable=bare-except
+ a, b, c = sys.exc_info()
+ traceback.print_exception(a, b, c)
+ sock.close()
+ raiseRecoverableError('Server at {0}:{1} is not ready'.format(safestr(host), safestr(port)))
+ sock.close()
+
+def doconn(desc):
+ """
+ open an SQL connection to the PG server
+ """
+ debug("doconn({},{},{})".format(desc['host'], desc['user'], desc['database']))
+ # debug("doconn({},{},{},{})".format(desc['host'], desc['user'], desc['database'], desc['password']))
+ ret = psycopg2.connect(**desc)
+ ret.autocommit = True
+ return ret
+
+def hostportion(hostport):
+ """
+ return the host portion of a fqdn:port or IPv4:port or [IPv6]:port
+ """
+ ipv4re = re.match(r"^([^:]+)(:(\d+))?", hostport)
+ ipv6re = re.match(r"^[[]([^]]+)[]](:(\d+))?", hostport)
+ if ipv4re:
+ return ipv4re.group(1)
+ if ipv6re:
+ return ipv6re.group(1)
+ raiseNonRecoverableError("invalid hostport: {}".format(hostport))
+
+def portportion(hostport):
+ """
+ Return the port portion of a fqdn:port or IPv4:port or [IPv6]:port.
+ If port is not present, return 5432.
+ """
+ ipv6re = re.match(r"^[[]([^]]+)[]](:(\d+))?", hostport)
+ ipv4re = re.match(r"^([^:]+)(:(\d+))?", hostport)
+ if ipv4re:
+ return ipv4re.group(3) if ipv4re.group(3) else '5432'
+ if ipv6re:
+ return ipv6re.group(3) if ipv6re.group(3) else '5432'
+ raiseNonRecoverableError("invalid hostport: {}".format(hostport))
+
+def rootdesc(data, dbname, initialpassword=None):
+ """
+ return the postgres connection information
+ """
+ debug("rootdesc(..data..,{0})".format(safestr(dbname)))
+ # pylint: disable=bad-continuation
+ return {
+ 'database': dbname,
+ 'host': hostportion(data['rw']),
+ 'port': portportion(data['rw']),
+ 'user': 'postgres',
+ 'password': initialpassword if initialpassword else getpass(data, 'postgres', data['rw'], 'postgres')
+ }
+
+def rootconn(data, dbname='postgres', initialpassword=None):
+ """
+ connect to a given server as postgres,
+ connecting to the specified database
+ """
+ debug("rootconn(..data..,{0})".format(safestr(dbname)))
+ return doconn(rootdesc(data, dbname, initialpassword))
+
+def onedesc(data, dbname, role, access):
+ """
+ return the connection information for a given user and dbname on a cluster
+ """
+ user = '{0}_{1}'.format(dbname, role)
+ # pylint: disable=bad-continuation
+ return {
+ 'database': dbname,
+ 'host': hostportion(data[access]),
+ 'port': portportion(data[access]),
+ 'user': user,
+ 'password': getpass(data, user, data['rw'], dbname)
+ }
+
+def dbdescs(data, dbname):
+ """
+ return the entire set of information for a specific server/database
+ """
+ # pylint: disable=bad-continuation
+ return {
+ 'admin': onedesc(data, dbname, 'admin', 'rw'),
+ 'user': onedesc(data, dbname, 'user', 'rw'),
+ 'viewer': onedesc(data, dbname, 'viewer', 'ro')
+ }
+
+def getpass(data, ident, hostport, dbname):
+ """
+ generate the password for a given user on a specific server
+ """
+ m = hashlib.sha256()
+ m.update(ident.encode())
+
+ # mix in the seed (the last line) for that database, if one exists
+ hostport = hostport.lower()
+ dbname = dbname.lower()
+ hostPortDbname = '{0}/{1}:{2}'.format(OPT_MANAGER_RESOURCES_PGAAS, hostport, dbname)
+ try:
+ lastLine = ''
+ with open(hostPortDbname, "r") as fp:
+ for line in fp:
+ lastLine = line
+ m.update(lastLine.encode())
+ except IOError:
+ pass
+
+ m.update(base64.b64decode(data['data']))
+ return m.hexdigest()
+
+def find_related_nodes(reltype, inst=None):
+ """
+ extract the related_nodes information from the context
+ for a specific relationship
+ """
+ if inst is None:
+ inst = ctx.instance
+ ret = []
+ for rel in inst.relationships:
+ if reltype in rel.type_hierarchy:
+ ret.append(rel.target)
+ return ret
+
+def chkfqdn(fqdn):
+ """
+ verify that a FQDN is valid
+ """
+ if fqdn is None:
+ return False
+ hp = hostportion(fqdn)
+ # not needed right now: pp = portportion(fqdn)
+ # TODO need to augment this for IPv6 addresses
+ return re.match('^[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$', hp) is not None
+
+def chkdbname(dbname):
+ """
+ verify that a database name is valid
+ """
+ ret = re.match('[a-zA-Z][a-zA-Z0-9]{0,43}', dbname) is not None and dbname != 'postgres'
+ if not ret:
+ warn("Invalid dbname: {0}".format(safestr(dbname)))
+ return ret
+
+def get_valid_domains():
+ """
+ Return a list of the valid names, suitable for inclusion in an error message.
+ """
+ msg = ''
+ import glob
+ validDomains = []
+ for f in glob.glob('{}/*'.format(OPT_MANAGER_RESOURCES_PGAAS)):
+ try:
+ with open(f, "r") as fp:
+ try:
+ tmpdata = json.load(fp)
+ if 'pubkey' in tmpdata:
+ validDomains.append(os.path.basename(f))
+ except: # pylint: disable=bare-except
+ pass
+ except: # pylint: disable=bare-except
+ pass
+ if len(validDomains) == 0:
+ msg += '\nNo valid PostgreSQL cluster information was found'
+ else:
+ msg += '\nThese are the valid PostgreSQL cluster domains found on this manager:'
+ for v in validDomains:
+ msg += '\n\t"{}"'.format(v)
+ return msg
+
+def get_existing_clusterinfo(wfqdn, rfqdn, related):
+ """
+ Retrieve all of the information specific to an existing cluster.
+ """
+ if rfqdn != '':
+ raiseNonRecoverableError('Read-only FQDN must not be specified when using an existing cluster, fqdn={0}'.format(safestr(rfqdn)))
+ if len(related) != 0:
+ raiseNonRecoverableError('Cluster SSH keypair must not be specified when using an existing cluster')
+ try:
+ fn = '{0}/{1}'.format(OPT_MANAGER_RESOURCES_PGAAS, wfqdn.lower())
+ with open(fn, 'r') as f:
+ data = json.load(f)
+ data['rw'] = wfqdn
+ return data
+ except Exception as e: # pylint: disable=broad-except
+ warn("Error: {0}".format(e))
+ msg = 'Cluster must be deployed when using an existing cluster.\nCheck your domain name: fqdn={0}\nerr={1}'.format(safestr(wfqdn), e)
+ if not os.path.isdir(OPT_MANAGER_RESOURCES_PGAAS):
+ msg += '\nThe directory {} does not exist. No PostgreSQL clusters have been deployed on this manager.'.format(OPT_MANAGER_RESOURCES_PGAAS)
+ else:
+ msg += get_valid_domains()
+ # warn("Stack: {0}".format(traceback.format_exc()))
+ raiseNonRecoverableError(msg)
+
+def getclusterinfo(wfqdn, reuse, rfqdn, initialpassword, related):
+ """
+ Retrieve all of the information specific to a cluster.
+ if reuse, retrieve it
+ else create and store it
+ """
+ # debug("getclusterinfo({}, {}, {}, {}, ..related..)".format(safestr(wfqdn), safestr(reuse), safestr(rfqdn), safestr(initialpassword)))
+ debug("getclusterinfo({}, {}, {}, ..related..)".format(safestr(wfqdn), safestr(reuse), safestr(rfqdn)))
+ if not chkfqdn(wfqdn):
+ raiseNonRecoverableError('Invalid FQDN specified for admin/read-write access, fqdn={0}'.format(safestr(wfqdn)))
+ if reuse:
+ return get_existing_clusterinfo(wfqdn, rfqdn, related)
+
+ if rfqdn == '':
+ rfqdn = wfqdn
+ elif not chkfqdn(rfqdn):
+ raiseNonRecoverableError('Invalid FQDN specified for read-only access, fqdn={0}'.format(safestr(rfqdn)))
+ if len(related) != 1:
+ raiseNonRecoverableError('Cluster SSH keypair must be specified using a dcae.relationships.pgaas_cluster_uses_sshkeypair ' +
+ 'relationship to a dcae.nodes.sshkeypair node')
+ data = {'ro': rfqdn, 'pubkey': related[0].instance.runtime_properties['public'],
+ 'data': related[0].instance.runtime_properties['base64private'], 'hash': 'sha256'}
+ os.umask(0o77)
+ try:
+ os.makedirs('{0}'.format(OPT_MANAGER_RESOURCES_PGAAS))
+ except: # pylint: disable=bare-except
+ pass
+ try:
+ with open('{0}/{1}'.format(OPT_MANAGER_RESOURCES_PGAAS, wfqdn.lower()), 'w') as f:
+ f.write(json.dumps(data))
+ except Exception as e: # pylint: disable=broad-except
+ warn("Error: {0}".format(e))
+ warn("Stack: {0}".format(traceback.format_exc()))
+ raiseNonRecoverableError('Cannot write cluster information to {0}: fqdn={1}, err={2}'.format(OPT_MANAGER_RESOURCES_PGAAS, safestr(wfqdn), e))
+ data['rw'] = wfqdn
+ if initialpassword:
+ with rootconn(data, initialpassword=initialpassword) as conn:
+ crr = conn.cursor()
+ dbexecute_trunc_print(crr, "ALTER USER postgres WITH PASSWORD %s", (getpass(data, 'postgres', wfqdn, 'postgres'),))
+ crr.close()
+ return data
+
+@operation
+def add_pgaas_cluster(**kwargs): # pylint: disable=unused-argument
+ """
+ dcae.nodes.pgaas.cluster:
+ Record key generation data for cluster
+ """
+ try:
+ warn("add_pgaas_cluster() invoked")
+ data = getclusterinfo(ctx.node.properties['writerfqdn'],
+ ctx.node.properties['use_existing'],
+ ctx.node.properties['readerfqdn'],
+ ctx.node.properties['initialpassword'],
+ find_related_nodes('dcae.relationships.pgaas_cluster_uses_sshkeypair'))
+ ctx.instance.runtime_properties['public'] = data['pubkey']
+ ctx.instance.runtime_properties['base64private'] = data['data']
+ ctx.instance.runtime_properties['postgrespswd'] = getpass(data, 'postgres', ctx.node.properties['writerfqdn'], 'postgres')
+ warn('All done')
+ except Exception as e: # pylint: disable=broad-except
+ ctx.logger.warn("Error: {0}".format(e))
+ ctx.logger.warn("Stack: {0}".format(traceback.format_exc()))
+ raise e
+
+@operation
+def rm_pgaas_cluster(**kwargs): # pylint: disable=unused-argument
+ """
+ dcae.nodes.pgaas.cluster:
+ Remove key generation data for cluster
+ """
+ try:
+ warn("rm_pgaas_cluster()")
+ wfqdn = ctx.node.properties['writerfqdn']
+ if chkfqdn(wfqdn) and not ctx.node.properties['use_existing']:
+ os.remove('{0}/{1}'.format(OPT_MANAGER_RESOURCES_PGAAS, wfqdn))
+ warn('All done')
+ except Exception as e: # pylint: disable=broad-except
+ ctx.logger.warn("Error: {0}".format(e))
+ ctx.logger.warn("Stack: {0}".format(traceback.format_exc()))
+ raise e
+
+def dbgetinfo(refctx):
+ """
+ Get the data associated with a database.
+ Make sure the connection exists.
+ """
+ wfqdn = refctx.node.properties['writerfqdn']
+ related = find_related_nodes('dcae.relationships.database_runson_pgaas_cluster', refctx.instance)
+ if wfqdn == '':
+ if len(related) != 1:
+ raiseNonRecoverableError('Database Cluster must be specified using exactly one dcae.relationships.database_runson_pgaas_cluster relationship ' +
+ 'to a dcae.nodes.pgaas.cluster node when writerfqdn is not specified')
+ wfqdn = related[0].node.properties['writerfqdn']
+ return dbgetinfo_for_update(wfqdn)
+
+def dbgetinfo_for_update(wfqdn):
+ """
+ Get the data associated with a database.
+ Make sure the connection exists.
+ """
+
+ if not chkfqdn(wfqdn):
+ raiseNonRecoverableError('Invalid FQDN specified for admin/read-write access, fqdn={0}'.format(safestr(wfqdn)))
+ ret = getclusterinfo(wfqdn, True, '', '', [])
+ waithp(hostportion(wfqdn), portportion(wfqdn))
+ return ret
+
+@operation
+def create_database(**kwargs):
+ """
+ dcae.nodes.pgaas.database:
+ Create a database on a cluster
+ """
+ try:
+ debug("create_database() invoked")
+ dbname = ctx.node.properties['name']
+ warn("create_database({0})".format(safestr(dbname)))
+ if not chkdbname(dbname):
+ raiseNonRecoverableError('Unacceptable or missing database name: {0}'.format(safestr(dbname)))
+ debug('create_database(): dbname checked out')
+ dbinfo = dbgetinfo(ctx)
+ debug('Got db server info')
+ descs = dbdescs(dbinfo, dbname)
+ ctx.instance.runtime_properties['admin'] = descs['admin']
+ ctx.instance.runtime_properties['user'] = descs['user']
+ ctx.instance.runtime_properties['viewer'] = descs['viewer']
+ with rootconn(dbinfo) as conn:
+ crx = conn.cursor()
+ dbexecute(crx, 'SELECT datname FROM pg_database WHERE datistemplate = false')
+ existingdbs = [x[0] for x in crx]
+ if ctx.node.properties['use_existing']:
+ if dbname not in existingdbs:
+ raiseNonRecoverableError('use_existing specified but database does not exist, dbname={0}'.format(safestr(dbname)))
+ return
+ dbexecute(crx, 'SELECT rolname FROM pg_roles')
+ existingroles = [x[0] for x in crx]
+ admu = descs['admin']['user']
+ usru = descs['user']['user']
+ vwru = descs['viewer']['user']
+ cusr = '{0}_common_user_role'.format(dbname)
+ cvwr = '{0}_common_viewer_role'.format(dbname)
+ schm = '{0}_db_common'.format(dbname)
+ if admu not in existingroles:
+ dbexecute_trunc_print(crx, 'CREATE USER {0} WITH PASSWORD %s'.format(admu), (descs['admin']['password'],))
+ if usru not in existingroles:
+ dbexecute_trunc_print(crx, 'CREATE USER {0} WITH PASSWORD %s'.format(usru), (descs['user']['password'],))
+ if vwru not in existingroles:
+ dbexecute_trunc_print(crx, 'CREATE USER {0} WITH PASSWORD %s'.format(vwru), (descs['viewer']['password'],))
+ if cusr not in existingroles:
+ dbexecute(crx, 'CREATE ROLE {0}'.format(cusr))
+ if cvwr not in existingroles:
+ dbexecute(crx, 'CREATE ROLE {0}'.format(cvwr))
+ if dbname not in existingdbs:
+ dbexecute(crx, 'CREATE DATABASE {0} WITH OWNER {1}'.format(dbname, admu))
+ crx.close()
+ with rootconn(dbinfo, dbname) as dbconn:
+ crz = dbconn.cursor()
+ for r in [cusr, cvwr, usru, vwru]:
+ dbexecute(crz, 'REVOKE ALL ON DATABASE {0} FROM {1}'.format(dbname, r))
+ dbexecute(crz, 'GRANT {0} TO {1}'.format(cvwr, cusr))
+ dbexecute(crz, 'GRANT {0} TO {1}'.format(cusr, admu))
+ dbexecute(crz, 'GRANT CONNECT ON DATABASE {0} TO {1}'.format(dbname, cvwr))
+ dbexecute(crz, 'CREATE SCHEMA IF NOT EXISTS {0} AUTHORIZATION {1}'.format(schm, admu))
+ for r in [admu, cusr, cvwr, usru, vwru]:
+ dbexecute(crz, 'ALTER ROLE {0} IN DATABASE {1} SET search_path = public, {2}'.format(r, dbname, schm))
+ dbexecute(crz, 'GRANT USAGE ON SCHEMA {0} to {1}'.format(schm, cvwr))
+ dbexecute(crz, 'GRANT CREATE ON SCHEMA {0} to {1}'.format(schm, admu))
+ dbexecute(crz, 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT SELECT ON TABLES TO {1}'.format(admu, cvwr))
+ dbexecute(crz, 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT INSERT, UPDATE, DELETE, TRUNCATE ON TABLES TO {1}'.format(admu, cusr))
+ dbexecute(crz, 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT USAGE, SELECT, UPDATE ON SEQUENCES TO {1}'.format(admu, cusr))
+ dbexecute(crz, 'GRANT TEMP ON DATABASE {0} TO {1}'.format(dbname, cusr))
+ dbexecute(crz, 'GRANT {0} to {1}'.format(cusr, usru))
+ dbexecute(crz, 'GRANT {0} to {1}'.format(cvwr, vwru))
+ crz.close()
+ warn('All done')
+ except Exception as e: # pylint: disable=broad-except
+ ctx.logger.warn("Error: {0}".format(e))
+ ctx.logger.warn("Stack: {0}".format(traceback.format_exc()))
+ raise e
+
+@operation
+def delete_database(**kwargs): # pylint: disable=unused-argument
+ """
+ dcae.nodes.pgaas.database:
+ Delete a database from a cluster
+ """
+ try:
+ debug("delete_database() invoked")
+ dbname = ctx.node.properties['name']
+ warn("delete_database({0})".format(safestr(dbname)))
+ if not chkdbname(dbname):
+ return
+ debug('delete_database(): dbname checked out')
+ if ctx.node.properties['use_existing']:
+ return
+ debug('delete_database(): !use_existing')
+ dbinfo = dbgetinfo(ctx)
+ debug('Got db server info')
+ with rootconn(dbinfo) as conn:
+ crx = conn.cursor()
+ admu = ctx.instance.runtime_properties['admin']['user']
+ usru = ctx.instance.runtime_properties['user']['user']
+ vwru = ctx.instance.runtime_properties['viewer']['user']
+ cusr = '{0}_common_user_role'.format(dbname)
+ cvwr = '{0}_common_viewer_role'.format(dbname)
+ dbexecute(crx, 'DROP DATABASE IF EXISTS {0}'.format(dbname))
+ for r in [usru, vwru, admu, cusr, cvwr]:
+ dbexecute(crx, 'DROP ROLE IF EXISTS {0}'.format(r))
+ warn('All gone')
+ except Exception as e: # pylint: disable=broad-except
+ ctx.logger.warn("Error: {0}".format(e))
+ ctx.logger.warn("Stack: {0}".format(traceback.format_exc()))
+ raise e
+
+#############################################################
+# function: update_database #
+# Purpose: Called as a workflow to change the database #
+# passwords for all the users #
+# #
+# Invoked via: #
+# cfy executions start -d <deployment-id> update_db_passwd #
+# #
+# Assumptions: #
+# 1) pgaas_types.yaml must define a work flow e.g. #
+# workflows: #
+# update_db_passwd : #
+# mapping : pgaas.pgaas.pgaas_plugin.update_database #
+# 2) DB Blueprint: node_template must have properties: #
+# writerfqdn & name (of DB) #
+#############################################################
+# pylint: disable=unused-argument
+@operation
+def update_database(refctx, **kwargs):
+ """
+ dcae.nodes.pgaas.database:
+ Update the password for a database from a cluster
+ refctx is auto injected into the function when called as a workflow
+ """
+ try:
+ debug("update_database() invoked")
+
+ ################################################
+ # Verify refctx contains the <nodes> attribute. #
+ # The workflow context might not be consistent #
+ # across different cloudify versions #
+ ################################################
+ if not hasattr(refctx, 'nodes'):
+ raiseNonRecoverableError('workflow context does not contain attribute=<nodes>. dir(refctx)={}'.format(dir(refctx)))
+
+ ############################################
+ # Verify that refctx.nodes is iterable #
+ ############################################
+ if not isinstance(refctx.nodes, collections.Iterable):
+ raiseNonRecoverableError("refctx.nodes is not an iterable. Type={}".format(type(refctx.nodes)))
+
+ ctx_node = None
+ ##############################################
+ # Iterate through the nodes until we find #
+ # one with the properties we are looking for #
+ ##############################################
+ for i in refctx.nodes:
+
+ ############################################
+ # Safeguard: If a given node doesn't have #
+ # properties then skip it. #
+ # Don't cause an exception since the nodes #
+ # entry we are searching might still exist #
+ ############################################
+ if not hasattr(i, 'properties'):
+ warn('Encountered a ctx node that does not have attr=<properties>. dir={}'.format(dir(i)))
+ continue
+
+ debug("ctx node has the following Properties: {}".format(list(i.properties.keys())))
+
+ if ('name' in i.properties) and ('writerfqdn' in i.properties):
+ ctx_node = i
+ break
+
+
+ ###############################################
+ # If none of the nodes have properties: #
+ # <name> and <writerfqdn> then fatal error #
+ ###############################################
+ if not ctx_node:
+ raiseNonRecoverableError('Either <name> or <writerfqdn> is not found in refctx.nodes.properties.')
+
+ debug("name is {}".format(ctx_node.properties['name']))
+ debug("host is {}".format(ctx_node.properties['writerfqdn']))
+
+ dbname = ctx_node.properties['name']
+ debug("update_database({0})".format(safestr(dbname)))
+
+ ###########################
+ # dbname must be valid #
+ ###########################
+ if not chkdbname(dbname):
+ raiseNonRecoverableError('dbname is null')
+
+
+ hostport = ctx_node.properties['writerfqdn']
+ debug('update_database(): wfqdn={}'.format(hostport))
+ dbinfo = dbgetinfo_for_update(hostport)
+
+ #debug('Got db server info={}'.format(dbinfo))
+
+ hostPortDbname = '{0}/{1}:{2}'.format(OPT_MANAGER_RESOURCES_PGAAS, hostport.lower(), dbname.lower())
+
+ debug('update_database(): hostPortDbname={}'.format(hostPortDbname))
+ try:
+ appended = False
+ with open(hostPortDbname, "a") as fp:
+ with open("/dev/urandom", "rb") as rp:
+ b = rp.read(16)
+ print(binascii.hexlify(b).decode('utf-8'), file=fp)
+ appended = True
+ if not appended:
+ ctx.logger.warn("Error: the password for {} {} was not successfully changed".format(hostport, dbname))
+ except Exception as e: # pylint: disable=broad-except
+ ctx.logger.warn("Error: {0}".format(e))
+ ctx.logger.warn("Stack: {0}".format(traceback.format_exc()))
+ raise e
+
+ descs = dbdescs(dbinfo, dbname)
+
+ ##########################################
+ # Verify we have expected keys #
+ # <admin>, <user>, and <viewer> as well #
+ # as "sub-key" <user> #
+ ##########################################
+
+ if not isinstance(descs, dict):
+ raiseNonRecoverableError('db descs has unexpected type=<{}> was expected type dict'.format(type(descs)))
+
+ for key in ("admin", "user", "viewer"):
+ if key not in descs:
+ raiseNonRecoverableError('db descs does not contain key=<{}>. Keys found for descs are: {}'.format(key, list(descs.keys())))
+ if 'user' not in descs[key]:
+ raiseNonRecoverableError('db descs[{}] does not contain key=<user>. Keys found for descs[{}] are: {}'.format(key, key, list(descs[key].keys())))
+
+
+ with rootconn(dbinfo) as conn:
+ crx = conn.cursor()
+
+ admu = descs['admin']['user']
+ usru = descs['user']['user']
+ vwru = descs['viewer']['user']
+
+ for r in [usru, vwru, admu]:
+ dbexecute_trunc_print(crx, "ALTER USER {} WITH PASSWORD '{}'".format(r, getpass(dbinfo, r, hostport, dbname)))
+ #debug("user={} password={}".format(r, getpass(dbinfo, r, hostport, dbname)))
+
+ warn('All users updated for database {}'.format(dbname))
+ except Exception as e: # pylint: disable=broad-except
+ ctx.logger.warn("Error: {0}".format(e))
+ ctx.logger.warn("Stack: {0}".format(traceback.format_exc()))
+ raise e
diff --git a/pgaas/pgaas_types.yaml b/pgaas/pgaas_types.yaml
new file mode 100644
index 0000000..951fbd5
--- /dev/null
+++ b/pgaas/pgaas_types.yaml
@@ -0,0 +1,67 @@
+# -*- indent-tabs-mode: nil -*- # vi: set expandtab:
+tosca_definitions_version: cloudify_dsl_1_3
+
+plugins:
+ pgaas:
+ executor: central_deployment_agent
+ package_name: pgaas
+ package_version: 1.2.0
+
+node_types:
+ dcae.nodes.pgaas.cluster:
+ derived_from: cloudify.nodes.Root
+ properties:
+ writerfqdn:
+ description: 'FQDN used for admin/read-write access to the cluster'
+ type: string
+ use_existing:
+ type: boolean
+ default: false
+ description: 'If set to true, the cluster exists and is being referenced'
+ readerfqdn:
+ description: 'FQDN used for read-only access to the cluster (default - same as writerfqdn)'
+ type: string
+ default: ''
+ port:
+ description: 'Port used for access to the cluster'
+ type: string
+ default: '5432'
+ initialpassword:
+ description: 'Password of existing PG instance to take control of'
+ type: string
+ default: ''
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create: pgaas.pgaas.pgaas_plugin.add_pgaas_cluster
+ delete: pgaas.pgaas.pgaas_plugin.rm_pgaas_cluster
+
+ dcae.nodes.pgaas.database:
+ derived_from: cloudify.nodes.Root
+ properties:
+ name:
+ type: string
+ description: 'Name of database (max 44 alphanumeric)'
+ use_existing:
+ type: boolean
+ default: false
+ description: 'If set to true, the database exists and is being referenced'
+ writerfqdn:
+ type: string
+ default: ''
+ description: 'Shortcut for connecting to a pgaas.cluster node (with use_existing=true) with a runson_pgaas_cluster relationship'
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create: pgaas.pgaas.pgaas_plugin.create_database
+ delete: pgaas.pgaas.pgaas_plugin.delete_database
+
+relationships:
+ dcae.relationships.pgaas_cluster_uses_sshkeypair:
+ derived_from: cloudify.relationships.connected_to
+ dcae.relationships.database_runson_pgaas_cluster:
+ derived_from: cloudify.relationships.contained_in
+ dcae.relationships.application_uses_pgaas_database:
+ derived_from: cloudify.relationships.connected_to
+
+workflows:
+ update_db_passwd :
+ mapping : pgaas.pgaas.pgaas_plugin.update_database
diff --git a/pgaas/pom.xml b/pgaas/pom.xml
new file mode 100644
index 0000000..7e7e0ed
--- /dev/null
+++ b/pgaas/pom.xml
@@ -0,0 +1,327 @@
+<?xml version="1.0"?>
+<!--
+============LICENSE_START=======================================================
+================================================================================
+Copyright (c) 2017,2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.dcaegen2.platform</groupId>
+ <artifactId>plugins</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+
+ <!--- CHANGE THE FOLLOWING 3 OBJECTS for your own repo -->
+ <groupId>org.onap.dcaegen2.platform.plugins</groupId>
+ <artifactId>pgaas</artifactId>
+ <name>pgaas</name>
+
+ <version>1.3.0-SNAPSHOT</version>
+ <url>http://maven.apache.org</url>
+ <properties>
+ <!-- vvvvvvvvvvvvvvvv not in relationships -->
+ <!-- name from the setup.py file -->
+ <plugin.name>pgaas</plugin.name>
+ <!-- path to directory containing the setup.py relative to this file -->
+ <plugin.subdir>.</plugin.subdir>
+ <!-- path of types file itself relative to this file -->
+ <typefile.source>pgaas_types.yaml</typefile.source>
+ <!-- path, in repo, to store type file -->
+ <typefile.dest>type_files/pgaas/1.1.0/pgaas_types.yaml</typefile.dest>
+ <!-- ^^^^^^^^^^^^^^^^ -->
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sonar.sources>.</sonar.sources>
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPaths>coverage.xml</sonar.python.coverage.reportPaths>
+ <sonar.language>py</sonar.language>
+ <sonar.pluginName>Python</sonar.pluginName>
+ <sonar.inclusions>**/*.py</sonar.inclusions>
+ <sonar.exclusions>tests/*,setup.py</sonar.exclusions>
+ </properties>
+
+ <build>
+ <finalName>${project.artifactId}-${project.version}</finalName>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>sonar-maven-plugin</artifactId>
+ <version>2.7.1</version>
+ </plugin>
+
+ <!-- nexus-staging-maven-plugin is called during deploy phase by default behavior.
+ we do not need it -->
+ <plugin>
+ <groupId>org.sonatype.plugins</groupId>
+ <artifactId>nexus-staging-maven-plugin</artifactId>
+ <version>1.6.7</version>
+ <configuration>
+ <skipNexusStagingDeployMojo>true</skipNexusStagingDeployMojo>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <version>2.8</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
+ <plugins>
+
+ <!-- first disable the default Java plugins at various stages -->
+ <!-- maven-resources-plugin is called during "*resource" phases by default behavior. it prepares the resources
+ dir. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>2.6</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-compiler-plugin is called during "compile" phases by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.1</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-jar-plugin is called during "compile" phase by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.4</version>
+ <executions>
+ <execution>
+ <id>default-jar</id>
+ <phase/>
+ </execution>
+ </executions>
+ </plugin>
+
+ <!-- maven-install-plugin is called during "install" phase by default behavior. it tries to copy stuff under
+ target dir to ~/.m2. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <version>2.4</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-surefire-plugin is called during "test" phase by default behavior. it triggers junit test.
+ we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.12.4</version>
+ <configuration>
+ <skipTests>true</skipTests>
+ </configuration>
+ </plugin>
+
+ <!-- now we configure custom action (calling a script) at various lifecycle phases -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.2.1</version>
+ <executions>
+ <execution>
+ <id>clean phase script</id>
+ <phase>clean</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>clean</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>generate-sources script</id>
+ <phase>generate-sources</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>generate-sources</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>compile script</id>
+ <phase>compile</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>compile</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>package script</id>
+ <phase>package</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>package</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>test script</id>
+ <phase>test</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>test</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>install script</id>
+ <phase>install</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>install</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>deploy script</id>
+ <phase>deploy</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>deploy</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <MVN_SERVER_ID>${project.distributionManagement.snapshotRepository.id}</MVN_SERVER_ID>
+ <TYPE_FILE_SOURCE>${typefile.source}</TYPE_FILE_SOURCE>
+ <TYPE_FILE_DEST>${typefile.dest}</TYPE_FILE_DEST>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/pgaas/requirements.txt b/pgaas/requirements.txt
new file mode 100644
index 0000000..83a931a
--- /dev/null
+++ b/pgaas/requirements.txt
@@ -0,0 +1,2 @@
+psycopg2-binary
+cloudify-common>=5.0.5
diff --git a/pgaas/setup.py b/pgaas/setup.py
new file mode 100644
index 0000000..8e6ace7
--- /dev/null
+++ b/pgaas/setup.py
@@ -0,0 +1,36 @@
+# org.onap.dcaegen2
+# ============LICENSE_START====================================================
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+from setuptools import setup, find_packages
+
+setup(
+ name="pgaas",
+ version="1.3.0",
+ packages=find_packages(),
+ author="AT&T",
+ description=("Cloudify plugin for pgaas/pgaas."),
+ license="http://www.apache.org/licenses/LICENSE-2.0",
+ keywords="",
+ url="https://onap.org",
+ zip_safe=False,
+ install_requires=[
+ 'psycopg2-binary',
+ 'cloudify-common>=5.0.5',
+ ],
+)
diff --git a/pgaas/tests/psycopg2.py b/pgaas/tests/psycopg2.py
new file mode 100644
index 0000000..ba8aadd
--- /dev/null
+++ b/pgaas/tests/psycopg2.py
@@ -0,0 +1,70 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+"""
+
+This is a mock psycopg2 module.
+
+"""
+
+class MockCursor(object):
+ """
+ mocked cursor
+ """
+ def __init__(self, **kwargs):
+ pass
+
+ def execute(self, cmd, exc=None):
+ """
+ mock SQL execution
+ """
+ pass
+
+ def close(self):
+ """
+ mock SQL close
+ """
+ pass
+
+ def __iter__(self):
+ return iter([])
+
+class MockConn(object): # pylint: disable=too-few-public-methods
+ """
+ mock SQL connection
+ """
+ def __init__(self, **kwargs):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+ def cursor(self): # pylint: disable=no-self-use
+ """
+ mock return a cursor
+ """
+ return MockCursor()
+
+def connect(**kwargs): # pylint: disable=unused-argument
+ """
+ mock get-a-connection
+ """
+ return MockConn()
diff --git a/pgaas/tests/test_plugin.py b/pgaas/tests/test_plugin.py
new file mode 100644
index 0000000..70ce6e9
--- /dev/null
+++ b/pgaas/tests/test_plugin.py
@@ -0,0 +1,291 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+"""
+unit tests for PostgreSQL password plugin
+"""
+
+from __future__ import print_function
+# pylint: disable=import-error,unused-import,wrong-import-order
+import pytest
+import socket
+import psycopg2
+import pgaas.pgaas_plugin
+from cloudify.mocks import MockCloudifyContext
+from cloudify.mocks import MockNodeContext
+from cloudify.mocks import MockNodeInstanceContext
+from cloudify.mocks import MockRelationshipSubjectContext
+from cloudify.state import current_ctx
+from cloudify.exceptions import NonRecoverableError
+from cloudify import ctx
+
+import sys
+import os
+sys.path.append(os.path.realpath(os.path.dirname(__file__)))
+import traceback
+
+TMPNAME = "/tmp/pgaas_plugin_tests_{}".format(os.environ["USER"] if "USER" in os.environ else
+ os.environ["LOGNAME"] if "LOGNAME" in os.environ else
+ str(os.getuid()))
+
+class MockKeyPair(object):
+ """
+ mock keypair for cloudify contexts
+ """
+ def __init__(self, type_hierarchy=None, target=None):
+ self._type_hierarchy = type_hierarchy
+ self._target = target
+
+ @property
+ def type_hierarchy(self):
+ """
+ return the type hierarchy
+ """
+ return self._type_hierarchy
+
+ @property
+ def target(self):
+ """
+ return the target
+ """
+ return self._target
+
+class MockInstance(object): # pylint: disable=too-few-public-methods
+ """
+ mock instance for cloudify contexts
+ """
+ def __init__(self, instance=None):
+ self._instance = instance
+
+ @property
+ def instance(self):
+ """
+ return the instance
+ """
+ return self._instance
+
+class MockRuntimeProperties(object): # pylint: disable=too-few-public-methods
+ """
+ mock runtime properties for cloudify contexts
+ """
+ def __init__(self, runtime_properties=None):
+ self._runtime_properties = runtime_properties
+
+ @property
+ def runtime_properties(self):
+ """
+ return the properties
+ """
+ return self._runtime_properties
+
+class MockSocket(object):
+ """
+ mock socket interface
+ """
+ def __init__(self):
+ pass
+ def connect(self, host=None, port=None):
+ """
+ mock socket connection
+ """
+ pass
+ def close(self):
+ """
+ mock socket close
+ """
+ pass
+
+
+def _connect(host, port): # pylint: disable=unused-argument
+ """
+ mock connection
+ """
+ return {}
+
+def set_mock_context(msg, monkeypatch, writerfqdn='test.bar.example.com'):
+ """
+ establish the mock context for our testing
+ """
+ print("================ %s ================" % msg)
+ # pylint: disable=bad-continuation
+ props = {
+ 'writerfqdn': writerfqdn,
+ 'use_existing': False,
+ 'readerfqdn': 'test-ro.bar.example.com',
+ 'name': 'testdb',
+ 'port': '5432',
+ 'initialpassword': 'test'
+ }
+
+ sshkeyprops = {
+ 'public': "testpub",
+ 'base64private': "testpriv"
+ }
+
+ mock_ctx = MockCloudifyContext(node_id='test_node_id', node_name='test_node_name',
+ # pylint: disable=bad-whitespace
+ properties=props,
+ relationships = [
+ MockKeyPair(type_hierarchy =
+ [ "dcae.relationships.pgaas_cluster_uses_sshkeypair" ],
+ target= MockInstance(
+ MockRuntimeProperties(sshkeyprops)) )
+ ],
+ runtime_properties = {
+ "admin": { "user": "admin_user" },
+ "user": { "user": "user_user" },
+ "viewer": { "user": "viewer_user" }
+ }
+ )
+ current_ctx.set(mock_ctx)
+ monkeypatch.setattr(socket.socket, 'connect', _connect)
+ # monkeypatch.setattr(psycopg2, 'connect', _connect)
+ pgaas.pgaas_plugin.setOptManagerResources(TMPNAME)
+ return mock_ctx
+
+
+@pytest.mark.dependency()
+def test_start(monkeypatch): # pylint: disable=unused-argument
+ """
+ put anything in here that needs to be done
+ PRIOR to the tests
+ """
+ pass
+
+@pytest.mark.dependency(depends=['test_start'])
+def test_add_pgaas_cluster(monkeypatch):
+ """
+ test add_pgaas_cluster()
+ """
+ try:
+ set_mock_context('test_add_pgaas_cluster', monkeypatch)
+ pgaas.pgaas_plugin.add_pgaas_cluster(args={})
+ except Exception as e:
+ print("Error: {0}".format(e))
+ print("Stack: {0}".format(traceback.format_exc()))
+ raise
+ finally:
+ current_ctx.clear()
+
+@pytest.mark.dependency(depends=['test_add_pgaas_cluster'])
+def test_add_database(monkeypatch):
+ """
+ test add_database()
+ """
+ try:
+ set_mock_context('test_add_database', monkeypatch)
+ pgaas.pgaas_plugin.create_database(args={})
+ except Exception as e:
+ print("Error: {0}".format(e))
+ print("Stack: {0}".format(traceback.format_exc()))
+ raise
+ finally:
+ current_ctx.clear()
+
+@pytest.mark.dependency(depends=['test_add_pgaas_cluster'])
+def test_bad_add_database(monkeypatch):
+ """
+ test bad_add_database()
+ """
+ try:
+ set_mock_context('test_add_database', monkeypatch, writerfqdn="bad.bar.example.com")
+ with pytest.raises(NonRecoverableError):
+ pgaas.pgaas_plugin.create_database(args={})
+ except Exception as e:
+ print("Error: {0}".format(e))
+ print("Stack: {0}".format(traceback.format_exc()))
+ raise
+ finally:
+ current_ctx.clear()
+
+@pytest.mark.dependency(depends=['test_add_database'])
+def test_update_database(monkeypatch):
+ """
+ test update_database()
+ """
+ try:
+ ########################################################
+ # Subtle test implications regarding: update_database #
+ # --------------------------------------------------- #
+ # 1) update_database is a workflow and the context #
+ # passed to it has 'nodes' attribute which is not #
+ # not included in MockCloudifyContext #
+ # 2) the 'nodes' attribute is a list of contexts so #
+ # we will have to create a sub-context #
+ # 3) update_database will iterate through each of the #
+ # nodes contexts looking for the correct one #
+ # 4) To identify the correct sub-context it will first#
+ # check each sub-context for the existence of #
+ # properties attribute #
+ # 5) ****Mock_context internally saves properties as #
+ # variable _properties and 'properties' is defined #
+ # as @property...thus it is not recognized as an #
+ # attribute...this will cause update_database to #
+ # fail so we need to explicitly create properties #
+ # properties attribute in the subcontext #
+ ########################################################
+
+ ####################
+ # Main context #
+ ####################
+ myctx = set_mock_context('test_update_database', monkeypatch)
+ ###########################################################
+ # Create subcontext and assign it to attribute properties #
+ # in main context #
+ ###########################################################
+ mynode = set_mock_context('test_update_database_node', monkeypatch)
+ # pylint: disable=protected-access
+ mynode.properties = mynode._properties
+ myctx.nodes = [mynode]
+ pgaas.pgaas_plugin.update_database(refctx=myctx)
+ except Exception as e:
+ print("Error: {0}".format(e))
+ print("Stack: {0}".format(traceback.format_exc()))
+ raise
+ finally:
+ current_ctx.clear()
+
+@pytest.mark.dependency(depends=['test_update_database'])
+def test_delete_database(monkeypatch):
+ """
+ test delete_database()
+ """
+ try:
+ set_mock_context('test_delete_database', monkeypatch)
+ pgaas.pgaas_plugin.delete_database(args={})
+ except Exception as e:
+ print("Error: {0}".format(e))
+ print("Stack: {0}".format(traceback.format_exc()))
+ raise
+ finally:
+ current_ctx.clear()
+
+@pytest.mark.dependency(depends=['test_delete_database'])
+def test_rm_pgaas_cluster(monkeypatch):
+ """
+ test rm_pgaas_cluster()
+ """
+ try:
+ set_mock_context('test_rm_pgaas_cluster', monkeypatch)
+ pgaas.pgaas_plugin.rm_pgaas_cluster(args={})
+ except Exception as e:
+ print("Error: {0}".format(e))
+ print("Stack: {0}".format(traceback.format_exc()))
+ raise
+ finally:
+ current_ctx.clear()
diff --git a/pgaas/tox.ini b/pgaas/tox.ini
new file mode 100644
index 0000000..967f664
--- /dev/null
+++ b/pgaas/tox.ini
@@ -0,0 +1,54 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+[tox]
+envlist = py27,py36,py37,py38,cov
+skip_missing_interpreters = true
+
+[testenv]
+# coverage can only find modules if pythonpath is set
+setenv=
+ PYTHONPATH={toxinidir}
+ COVERAGE_FILE=.coverage.{envname}
+deps=
+ -rrequirements.txt
+ pytest
+ coverage
+ pytest-cov
+whitelist_externals=
+ /bin/mkdir
+commands=
+ mkdir -p logs
+ coverage erase
+ pytest --junitxml xunit-results.{envname}.xml --cov pgaas
+
+[testenv:cov]
+skip_install = true
+deps=
+ coverage
+setenv=
+ COVERAGE_FILE=.coverage
+commands=
+ coverage combine
+ coverage xml
+ coverage report
+ coverage html
+
+[pytest]
+junit_family = xunit2
diff --git a/pom.xml b/pom.xml
index 26a98a0..91dbd4f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -16,7 +16,6 @@ See the License for the specific language governing permissions and
limitations under the License.
============LICENSE_END=========================================================
-ECOMP is a trademark and service mark of AT&T Intellectual Property.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
@@ -32,10 +31,14 @@ ECOMP is a trademark and service mark of AT&T Intellectual Property.
<url>http://maven.apache.org</url>
<packaging>pom</packaging>
<modules>
+ <module>pgaas</module>
+ <module>clamp-policy</module>
<module>dcae-policy</module>
+ <module>dmaap</module>
+ <module>helm</module>
<module>k8s</module>
- <module>clamp-policy</module>
<module>relationships</module>
+ <module>sshkeyshare</module>
</modules>
<properties>
<onap.nexus.url>https://nexus.onap.org</onap.nexus.url>
diff --git a/relationships/.gitignore b/relationships/.gitignore
index 5c75135..c0b2113 100644
--- a/relationships/.gitignore
+++ b/relationships/.gitignore
@@ -1,59 +1,5 @@
+# local additions to plugins .gitignore
cfyhelper.sh
-.cloudify/
-*.wgn
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-env/
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-*.egg-info/
-.installed.cfg
-*.egg
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*,cover
-.hypothesis/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
local_settings.py
# Flask stuff:
@@ -63,12 +9,6 @@ instance/
# Scrapy stuff:
.scrapy
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
# Jupyter Notebook
.ipynb_checkpoints
@@ -81,11 +21,6 @@ celerybeat-schedule
# dotenv
.env
-# virtualenv
-.venv/
-venv/
-ENV/
-
# Spyder project settings
.spyderproject
diff --git a/relationships/LICENSE.txt b/relationships/LICENSE.txt
index cb8008a..a4ece14 100644
--- a/relationships/LICENSE.txt
+++ b/relationships/LICENSE.txt
@@ -1,7 +1,7 @@
============LICENSE_START=======================================================
org.onap.dcae
================================================================================
-Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,10 +16,9 @@ See the License for the specific language governing permissions and
limitations under the License.
============LICENSE_END=========================================================
-ECOMP is a trademark and service mark of AT&T Intellectual Property.
-Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
===================================================================
Licensed under the Creative Commons License, Attribution 4.0 Intl. (the "License");
you may not use this documentation except in compliance with the License.
diff --git a/relationships/example_register_to_blueprint.yaml b/relationships/example_register_to_blueprint.yaml
index 6d01f3e..59a9fe4 100644
--- a/relationships/example_register_to_blueprint.yaml
+++ b/relationships/example_register_to_blueprint.yaml
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/relationships/pom.xml b/relationships/pom.xml
index 6b2de69..713f957 100644
--- a/relationships/pom.xml
+++ b/relationships/pom.xml
@@ -16,7 +16,6 @@ See the License for the specific language governing permissions and
limitations under the License.
============LICENSE_END=========================================================
-ECOMP is a trademark and service mark of AT&T Intellectual Property.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
diff --git a/relationships/relationship-types.yaml b/relationships/relationship-types.yaml
index 25e86e7..9919cd8 100644
--- a/relationships/relationship-types.yaml
+++ b/relationships/relationship-types.yaml
@@ -1,5 +1,5 @@
# ================================================================================
-# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
tosca_definitions_version: cloudify_dsl_1_3
imports:
diff --git a/relationships/relationshipplugin/__init__.py b/relationships/relationshipplugin/__init__.py
index 259e52c..e342af3 100644
--- a/relationships/relationshipplugin/__init__.py
+++ b/relationships/relationshipplugin/__init__.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
from .tasks import add_relationship, remove_relationship, \
diff --git a/relationships/relationshipplugin/discovery.py b/relationships/relationshipplugin/discovery.py
index 1b87a65..ec1b576 100644
--- a/relationships/relationshipplugin/discovery.py
+++ b/relationships/relationshipplugin/discovery.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,7 +17,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
try:
from urllib.parse import urlparse
diff --git a/relationships/relationshipplugin/tasks.py b/relationships/relationshipplugin/tasks.py
index b17403c..094c8c7 100644
--- a/relationships/relationshipplugin/tasks.py
+++ b/relationships/relationshipplugin/tasks.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import json
from cloudify import ctx
@@ -107,7 +106,7 @@ def registered_to(**kwargs):
#Storing in source because that's who is getting registered
ctx.source.instance.runtime_properties[CONSUL_HOST] = "http://{0}:{1}".format(consul_host, consul_port)
ctx.source.instance.runtime_properties["name_to_register"] = name #careful! delete does not have access to inputs
-
+
try:
response = requests.put(url = "{0}/v1/agent/service/register".format(ctx.source.instance.runtime_properties[CONSUL_HOST]),
json = {
diff --git a/relationships/setup.py b/relationships/setup.py
index c4eb941..4687138 100644
--- a/relationships/setup.py
+++ b/relationships/setup.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2019 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,7 +17,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import os
from setuptools import setup
diff --git a/relationships/tests/test_discovery.py b/relationships/tests/test_discovery.py
index 808b413..2dbc919 100644
--- a/relationships/tests/test_discovery.py
+++ b/relationships/tests/test_discovery.py
@@ -1,7 +1,7 @@
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@
# limitations under the License.
# ============LICENSE_END=========================================================
#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import pytest
from relationshipplugin import discovery as dis
diff --git a/relationships/tox.ini b/relationships/tox.ini
index c734632..a9e10a0 100644
--- a/relationships/tox.ini
+++ b/relationships/tox.ini
@@ -1,5 +1,6 @@
[tox]
-envlist = py27,py36,cov
+envlist = py27,py36,py37,py38,cov
+skip_missing_interpreters = true
[testenv]
# coverage can only find modules if pythonpath is set
@@ -24,6 +25,8 @@ setenv=
commands=
coverage combine
coverage xml
+ coverage report
+ coverage html
[pytest]
junit_family = xunit2
diff --git a/sshkeyshare/LICENSE.txt b/sshkeyshare/LICENSE.txt
new file mode 100644
index 0000000..86c0033
--- /dev/null
+++ b/sshkeyshare/LICENSE.txt
@@ -0,0 +1,17 @@
+============LICENSE_START=======================================================
+org.onap.dcaegen2
+================================================================================
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
diff --git a/sshkeyshare/README.md b/sshkeyshare/README.md
new file mode 100644
index 0000000..ac0b1fe
--- /dev/null
+++ b/sshkeyshare/README.md
@@ -0,0 +1,56 @@
+<!--
+============LICENSE_START=======================================================
+org.onap.dcaegen2
+================================================================================
+Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+-->
+
+# sshkeyshare plugin
+Cloudify plugin for creating ssh key pairs on the fly
+# Description
+The sshkeyshare Cloudify plugin creates an ssh key pair that can be used,
+by VMs or other containers spun up by a Cloudify blueprint, for establishing
+connections, among them. The blue print can, for example, provide the
+private key to one VM and the public one to another, as part of their
+initial configuration, to allow the one with the private key to
+automatically connect to the other one, to run commands.
+# Plugin Requirements
+* Python versions
+ * 2.7.x
+
+Note: These requirements apply to the VM where Cloudify Manager itself runs.
+
+Note: Cloudify Manager, itself, requires Pythong 2.7.x (and CentOS 7).
+
+# Types
+## dcaegen2.nodes.ssh.keypair
+**Derived From:** cloudify.nodes.Root
+
+**Properties:**
+This type has no properties
+
+**Mapped Operations:**
+* `cloudify.interfaces.lifecycle.create` Creates a new ssh keypair
+using ssh-keygen
+
+**Attributes:**
+* `public` A string containing the public key of the newly created
+keypair.
+* `base64private` A single line base-64 encoded representation of
+the content of the private key file for the newly created keypair.
+
+# Relationships
+This plugin does not define or use any relationships
diff --git a/sshkeyshare/pom.xml b/sshkeyshare/pom.xml
new file mode 100644
index 0000000..c2062f4
--- /dev/null
+++ b/sshkeyshare/pom.xml
@@ -0,0 +1,327 @@
+<?xml version="1.0"?>
+<!--
+============LICENSE_START=======================================================
+org.onap.dcaegen2
+================================================================================
+Copyright (c) 2017,2020 AT&T Intellectual Property. All rights reserved.
+================================================================================
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+============LICENSE_END=========================================================
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.dcaegen2.platform</groupId>
+ <artifactId>plugins</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+
+ <!--- CHANGE THE FOLLOWING 3 OBJECTS for your own repo -->
+ <groupId>org.onap.dcaegen2.platform.plugins</groupId>
+ <artifactId>sshkeyshare</artifactId>
+ <name>sshkeyshare</name>
+
+ <version>1.2.0-SNAPSHOT</version>
+ <url>http://maven.apache.org</url>
+ <properties>
+ <!-- name from the setup.py file -->
+ <plugin.name>sshkeyshare</plugin.name>
+ <!-- path to directory containing the setup.py relative to this file -->
+ <plugin.subdir>.</plugin.subdir>
+ <!-- path of types file itself relative to this file -->
+ <typefile.source>sshkey_types.yaml</typefile.source>
+ <!-- path, in repo, to store type file -->
+ <typefile.dest>type_files/sshkeyshare/sshkey_types.yaml</typefile.dest>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <sonar.sources>.</sonar.sources>
+ <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath>
+ <sonar.python.coverage.reportPaths>coverage.xml</sonar.python.coverage.reportPaths>
+ <sonar.language>py</sonar.language>
+ <sonar.pluginName>Python</sonar.pluginName>
+ <sonar.inclusions>**/*.py</sonar.inclusions>
+ <sonar.exclusions>tests/*,setup.py</sonar.exclusions>
+ </properties>
+
+ <build>
+ <finalName>${project.artifactId}-${project.version}</finalName>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>sonar-maven-plugin</artifactId>
+ <version>2.7.1</version>
+ </plugin>
+
+ <!-- nexus-staging-maven-plugin is called during deploy phase by default behavior.
+ we do not need it -->
+ <plugin>
+ <groupId>org.sonatype.plugins</groupId>
+ <artifactId>nexus-staging-maven-plugin</artifactId>
+ <version>1.6.7</version>
+ <configuration>
+ <skipNexusStagingDeployMojo>true</skipNexusStagingDeployMojo>
+ </configuration>
+ </plugin>
+
+ <!-- maven-deploy-plugin is called during deploy but we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <version>2.8</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
+ <plugins>
+
+ <!-- first disable the default Java plugins at various stages -->
+ <!-- maven-resources-plugin is called during "*resource" phases by default behavior. it prepares the resources
+ dir. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>2.6</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-compiler-plugin is called during "compile" phases by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.1</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-jar-plugin is called during "compile" phase by default behavior. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <version>2.4</version>
+ <executions>
+ <execution>
+ <id>default-jar</id>
+ <phase/>
+ </execution>
+ </executions>
+ </plugin>
+
+ <!-- maven-install-plugin is called during "install" phase by default behavior. it tries to copy stuff under
+ target dir to ~/.m2. we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <version>2.4</version>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+
+ <!-- maven-surefire-plugin is called during "test" phase by default behavior. it triggers junit test.
+ we do not need it -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>2.12.4</version>
+ <configuration>
+ <skipTests>true</skipTests>
+ </configuration>
+ </plugin>
+
+ <!-- now we configure custom action (calling a script) at various lifecycle phases -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>clean phase script</id>
+ <phase>clean</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>clean</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>generate-sources script</id>
+ <phase>generate-sources</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>generate-sources</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>compile script</id>
+ <phase>compile</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>compile</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>package script</id>
+ <phase>package</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>package</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>test script</id>
+ <phase>test</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>test</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>install script</id>
+ <phase>install</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>install</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ </environmentVariables>
+ </configuration>
+ </execution>
+
+ <execution>
+ <id>deploy script</id>
+ <phase>deploy</phase>
+ <goals><goal>exec</goal></goals>
+ <configuration>
+ <executable>${session.executionRootDirectory}/mvn-phase-script.sh</executable>
+ <arguments>
+ <argument>${project.artifactId}</argument>
+ <argument>deploy</argument>
+ </arguments>
+ <environmentVariables>
+ <!-- make mvn properties as env for our script -->
+ <MVN_PROJECT_GROUPID>${project.groupId}</MVN_PROJECT_GROUPID>
+ <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID>
+ <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION>
+ <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY>
+ <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD>
+ <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD>
+ <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID>
+ <MVN_SERVER_ID>${project.distributionManagement.snapshotRepository.id}</MVN_SERVER_ID>
+ <TYPE_FILE_SOURCE>${typefile.source}</TYPE_FILE_SOURCE>
+ <TYPE_FILE_DEST>${typefile.dest}</TYPE_FILE_DEST>
+ <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME>
+ <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR>
+ </environmentVariables>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/sshkeyshare/requirements.txt b/sshkeyshare/requirements.txt
new file mode 100644
index 0000000..833d57a
--- /dev/null
+++ b/sshkeyshare/requirements.txt
@@ -0,0 +1 @@
+cloudify-common>=5.0.5
diff --git a/sshkeyshare/setup.py b/sshkeyshare/setup.py
new file mode 100644
index 0000000..3b66c88
--- /dev/null
+++ b/sshkeyshare/setup.py
@@ -0,0 +1,37 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+import os
+from setuptools import setup, find_packages
+
+setup(
+ name='sshkeyshare',
+ version='1.2.0',
+ packages=find_packages(),
+ author='AT&T',
+ description=('Cloudify plugin for creating ssh keypairs on the fly.'),
+ license='Apache 2.0',
+ keywords='',
+ url='https://wiki.onap.org',
+ zip_safe=False,
+ package_data={'':['LICENSE.txt']},
+ install_requires=[
+ 'cloudify-common>=5.0.5',
+ ],
+)
diff --git a/sshkeyshare/sshkey_types.yaml b/sshkeyshare/sshkey_types.yaml
new file mode 100644
index 0000000..565ab1e
--- /dev/null
+++ b/sshkeyshare/sshkey_types.yaml
@@ -0,0 +1,17 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+plugins:
+ ssh_keyshare:
+ executor: central_deployment_agent
+ package_name: sshkeyshare
+ package_version: 1.1.0
+node_types:
+ dcaegen2.nodes.ssh.keypair:
+ derived_from: cloudify.nodes.Root
+ properties:
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: ssh_keyshare.sshkeyshare.keyshare_plugin.generate
diff --git a/sshkeyshare/sshkeyshare/__init__.py b/sshkeyshare/sshkeyshare/__init__.py
new file mode 100644
index 0000000..0d79837
--- /dev/null
+++ b/sshkeyshare/sshkeyshare/__init__.py
@@ -0,0 +1,28 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+import logging
+
+def get_module_logger(mod_name):
+ logger = logging.getLogger(mod_name)
+ handler=logging.StreamHandler()
+ formatter=logging.Formatter('%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+ logger.setLevel(logging.DEBUG)
+ return logger
diff --git a/sshkeyshare/sshkeyshare/keyshare_plugin.py b/sshkeyshare/sshkeyshare/keyshare_plugin.py
new file mode 100644
index 0000000..41e56e0
--- /dev/null
+++ b/sshkeyshare/sshkeyshare/keyshare_plugin.py
@@ -0,0 +1,39 @@
+# ============LICENSE_START====================================================
+# org.onap.dcaegen2
+# =============================================================================
+# Copyright (c) 2017,2020 AT&T Intellectual Property. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+import uuid
+import os
+from cloudify import ctx
+from cloudify.decorators import operation
+
+@operation(resumable=True)
+def generate(**kwargs):
+ """
+ Create SSH key pair
+ """
+ tmpdir = '/tmp/{0}'.format(uuid.uuid4().hex)
+ os.mkdir(tmpdir, 0o700)
+ os.system('ssh-keygen -t rsa -b 2048 -C "hadoop@cdapcluster" -N "" -f {0}/id_rsa'.format(tmpdir))
+ os.system('base64 -w 0 <{0}/id_rsa >{0}/id64'.format(tmpdir))
+ with open('{0}/id64'.format(tmpdir), 'r') as f:
+ k64 = f.read()
+ with open('{0}/id_rsa.pub'.format(tmpdir), 'r') as f:
+ pub = f.read()
+ os.system('rm -rf {0}'.format(tmpdir))
+ ctx.instance.runtime_properties['public'] = pub.strip()
+ ctx.instance.runtime_properties['base64private'] = k64.strip()
diff --git a/sshkeyshare/tests/test_plugin.py b/sshkeyshare/tests/test_plugin.py
new file mode 100644
index 0000000..6c5fe93
--- /dev/null
+++ b/sshkeyshare/tests/test_plugin.py
@@ -0,0 +1,14 @@
+import sshkeyshare.keyshare_plugin
+from cloudify.mocks import MockCloudifyContext
+from cloudify.state import current_ctx
+from cloudify import ctx
+
+def test_generate():
+ mock_ctx = MockCloudifyContext(node_id='test_node_id', node_name='test_node_name', properties={})
+ try:
+ current_ctx.set(mock_ctx)
+ sshkeyshare.keyshare_plugin.generate()
+ pub = ctx.instance.runtime_properties['public']
+ pvt64 = ctx.instance.runtime_properties['base64private']
+ finally:
+ current_ctx.clear()
diff --git a/sshkeyshare/tox.ini b/sshkeyshare/tox.ini
new file mode 100644
index 0000000..af05cdc
--- /dev/null
+++ b/sshkeyshare/tox.ini
@@ -0,0 +1,35 @@
+# ============LICENSE_START====================================================
+# =============================================================================
+# Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
+# Copyright (c) 2020 Pantheon.tech. All rights reserved.
+# =============================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END======================================================
+
+[tox]
+envlist = py27,py36,py37,py38
+skip_missing_interpreters = true
+
+[testenv]
+deps=
+ pytest
+ coverage
+ pytest-cov
+ -rrequirements.txt
+setenv=
+ PYTHONPATH={toxinidir}
+commands=
+ pytest --junitxml xunit-results.xml --cov sshkeyshare --cov-report xml
+ coverage xml
+ coverage report
+ coverage html