summaryrefslogtreecommitdiffstats
path: root/mod/onboardingapi/dcae_cli/util
diff options
context:
space:
mode:
authorMichael Hwang <mhwang@research.att.com>2019-11-12 16:04:20 -0500
committerMichael Hwang <mhwang@research.att.com>2019-12-13 16:46:11 -0500
commitc698e66797bad69b4c77b26b487bf8322989beb0 (patch)
treee40a8449728768107e4ab4c1ac506af13230a580 /mod/onboardingapi/dcae_cli/util
parent9cb529e42f5625f2fa802e21919b10f814a89ca7 (diff)
Copy dcae-cli->onboardingapi, copy component specs
Issue-ID: DCAEGEN2-1860 Change-Id: I4805398c76479fad51cbdb74470ccc8f706ce9dc Signed-off-by: Michael Hwang <mhwang@research.att.com>
Diffstat (limited to 'mod/onboardingapi/dcae_cli/util')
-rw-r--r--mod/onboardingapi/dcae_cli/util/__init__.py120
-rw-r--r--mod/onboardingapi/dcae_cli/util/cdap_util.py206
-rw-r--r--mod/onboardingapi/dcae_cli/util/config.py156
-rw-r--r--mod/onboardingapi/dcae_cli/util/discovery.py777
-rw-r--r--mod/onboardingapi/dcae_cli/util/dmaap.py358
-rw-r--r--mod/onboardingapi/dcae_cli/util/docker_util.py226
-rw-r--r--mod/onboardingapi/dcae_cli/util/exc.py35
-rw-r--r--mod/onboardingapi/dcae_cli/util/inputs.py40
-rw-r--r--mod/onboardingapi/dcae_cli/util/logger.py56
-rw-r--r--mod/onboardingapi/dcae_cli/util/policy.py64
-rw-r--r--mod/onboardingapi/dcae_cli/util/profiles.py238
-rw-r--r--mod/onboardingapi/dcae_cli/util/run.py293
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_cdap_util.py93
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_config.py137
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_discovery.py447
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_dmaap.py259
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_docker_util.py62
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_inputs.py37
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_profiles.py162
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_remove.py24
-rw-r--r--mod/onboardingapi/dcae_cli/util/tests/test_undeploy.py62
-rw-r--r--mod/onboardingapi/dcae_cli/util/undeploy.py111
22 files changed, 3963 insertions, 0 deletions
diff --git a/mod/onboardingapi/dcae_cli/util/__init__.py b/mod/onboardingapi/dcae_cli/util/__init__.py
new file mode 100644
index 0000000..b39de74
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/__init__.py
@@ -0,0 +1,120 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides reusable utilites
+"""
+import os
+import json
+import sys
+import errno
+import contextlib
+import requests
+
+import six
+import click
+
+from dcae_cli.util.exc import DcaeException, FileNotFoundError
+
+
+APP_NAME = 'dcae-cli'
+
+
+def get_app_dir():
+ '''Returns the absolute directory path for dcae cli aux files'''
+ return click.get_app_dir(APP_NAME)
+
+
+def makedirs(path, exist_ok=True):
+ '''Emulates Python 3.2+ os.makedirs functionality'''
+ try:
+ os.makedirs(path, exist_ok=exist_ok)
+ except TypeError:
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno == errno.EEXIST and not exist_ok:
+ raise
+
+
+def get_pref(path, init_func=None):
+ '''Returns a general preference dict. Uses `init_func` to create a new one if the file does not exist.'''
+ try:
+ with open(path) as file:
+ pref = json.load(file)
+ except FileNotFoundError:
+ pref = init_func() if init_func is not None else dict()
+ write_pref(pref, path)
+ return pref
+
+
+def pref_exists(path):
+ return os.path.isfile(path)
+
+
+def update_pref(path, init_func=None, **kwargs):
+ '''Sets specified key-value pairs in a preference file and returns an updated dict'''
+ pref = get_pref(path, init_func)
+ pref.update(kwargs)
+ write_pref(pref, path)
+
+ return pref
+
+
+def write_pref(pref, path):
+ '''Writes a preference json file to disk'''
+ makedirs(os.path.dirname(path), exist_ok=True)
+ with open(path, 'w') as file:
+ json.dump(pref, file)
+
+
+def reraise_with_msg(e, msg=None, cls=None, as_dcae=False):
+ '''Reraises exception e with an additional message prepended'''
+ if as_dcae:
+ cls = DcaeException
+ traceback = sys.exc_info()[2]
+ cls = e.__class__ if cls is None else cls
+ new_msg = "{:}: {:}".format(msg, e) if msg else str(e)
+ new_e = cls(new_msg)
+ six.reraise(cls, new_e, traceback)
+
+
+def load_json(path):
+ '''Helper function which loads a JSON file and returns a dict'''
+ with open(path) as file:
+ try:
+ return json.load(file)
+ except ValueError:
+ raise DcaeException("File '{}' appears to be a malformed JSON.".format(path))
+
+
+def fetch_file_from_web(server_url, path, transform_func=json.loads):
+ """Fetch file from a web server
+
+ The default behavior is to transform the response to a json.
+ """
+ artifact_url = "{0}/{1}".format(server_url, path)
+ r = requests.get(artifact_url)
+ r.raise_for_status()
+ if transform_func:
+ return transform_func(r.text)
+ else:
+ return r.text
diff --git a/mod/onboardingapi/dcae_cli/util/cdap_util.py b/mod/onboardingapi/dcae_cli/util/cdap_util.py
new file mode 100644
index 0000000..a38f530
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/cdap_util.py
@@ -0,0 +1,206 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides utilities for cdap components
+"""
+import logging
+import json
+import requests
+import six
+
+from dcae_cli.util.logger import get_logger
+from dcae_cli.util.exc import DcaeException
+from dcae_cli.util import discovery
+
+_logger = get_logger('cdap-utils')
+_logger.setLevel(logging.DEBUG)
+
+#HELPER FUNCTIONS
+def _merge_spec_config_into_broker_put(jar, config, spec, params, templated_conf):
+ """
+ The purpose of this function is to form the CDAP Broker PUT from the CDAP compponent jar, spec, config, and params, where:
+ - jar is a URL
+ - config is the CDAP "auxillary file"
+ - spec is the CDAP component specification
+ - params contains the subkeys "app_config", "app_preferences", "program_preferences" from the parameters config specification
+ - (this last one isn't REALLY needed because it is a subset of "spec", but some preprocessing has already been done, specifically "normalize_cdap_params"
+
+ The CDAP Broker API existed well before the component spec, so there is overlap with different naming.
+ In the future, if this component spec becomes production and everyone follows it,
+ I will change the broker API to use the same keys so that this mapping becomes unneccessary.
+ However, while this is still a moving project, I am simply going to do a horrible mapping here.
+
+ The CDAP broker PUT looks as follows:
+ {
+ "service_component_type" : ...,
+ "jar_url" : ...,
+ "artifact_name" : ...,
+ "artifact_version" : ...,
+ "app_config" : ...,
+ "app_preferences" : ...,
+ "program_preferences": ...,
+ "programs": ...,
+ "streamname" : ...,
+ "namespace" : ...,
+ "service_endpoints" : ...
+ }
+
+ "So you cooked up a story and dropped the six of us into a *meat grinder*" - Arnold Schwarzenegger, Predator.
+
+ #RE: Streams/consumes: this is used in the designer for validation but does not lead to anything in the CDAP developers configuration.
+ """
+
+ #map services/provides into service_endpoints broker JSON
+ services = spec["services"]["provides"] # is [] if empty
+ se = []
+ if services != []:
+ for s in services:
+ se.append({"service_name" : s["service_name"], "service_endpoint" : s["service_endpoint"], "endpoint_method" : s["verb"]})
+
+ BrokerPut = {
+ "cdap_application_type" : "program-flowlet", #TODO! Fix this once Hydrator apps is integrated into this CLI tool.
+ "service_component_type" : spec["self"]["component_type"],
+ "jar_url" : jar,
+ "artifact_version" : config["artifact_version"],
+ "artifact_name" : config["artifact_name"],
+ "artifact_version" : config["artifact_version"],
+ "programs": config["programs"],
+ "streamname" : config["streamname"],
+ "services" : se,
+ }
+
+ Optionals = {v : config[v] for v in [i for i in ["namespace"] if i in config]}
+
+ #not a fan of whatever is going on in update such that I can't return this in single line
+ BrokerPut.update(Optionals)
+ BrokerPut.update(params)
+
+ # NOTE: app_config comes from params
+ BrokerPut["app_config"]["services_calls"] = templated_conf["services_calls"]
+ BrokerPut["app_config"]["streams_publishes"] = templated_conf["streams_publishes"]
+ BrokerPut["app_config"]["streams_subscribes"] = templated_conf["streams_subscribes"]
+
+ return BrokerPut
+
+def _get_broker_url_from_profile(profile):
+ """
+ Gets the broker URL from profile
+ """
+ #Functions named so well you don't need docstrings. (C) tombo 2017
+ res = requests.get("http://{0}:8500/v1/catalog/service/{1}".format(profile.consul_host, profile.cdap_broker)).json()
+ return "http://{ip}:{port}".format(ip=res[0]["ServiceAddress"], port=res[0]["ServicePort"])
+
+#PUBLIC
+def run_component(catalog, params, instance_name, profile, jar, config, spec, templated_conf):
+ """
+ Runs a CDAP Component
+
+ By the time this function is called, the instance_name and instance_name:rel have already been pushed into consul by this parent function
+ instance_name will be overwritten by the broker and the rels key will be used by the broker to call the CBS
+ """
+ broker_url = _get_broker_url_from_profile(profile)
+
+ #register with the broker
+ broker_put = _merge_spec_config_into_broker_put(jar, config, spec, params, templated_conf)
+
+ #helps the component developer debug their spec if CDAP throws a 400
+ _logger.info("Your (unbound, bound will be shown if deployment completes) app_config is being sent as")
+ _logger.info(json.dumps(broker_put["app_config"]))
+
+ _logger.info("Your app_preferences are being sent as")
+ _logger.info(json.dumps(broker_put["app_preferences"]))
+
+ _logger.info("Your program_preferences are being sent as")
+ _logger.info(json.dumps(broker_put["program_preferences"]))
+
+ response = requests.put("{brokerurl}/application/{appname}".format(brokerurl=broker_url, appname=instance_name),
+ json = broker_put,
+ headers = {'content-type':'application/json'})
+
+ deploy_success = False
+ try:
+ response.raise_for_status() #bomb if not 2xx
+ deploy_success = True
+ except:
+ #need this to raise a dirty status code for tests to work, so not just logging
+ raise DcaeException("A Deployment Error Occured. Broker Response: {0}, Broker Response Text: {1}".format(response.status_code, response.text))
+
+ if deploy_success:
+ #TODO: not sure what this error handling looks like, should never happen that a deploy succeeds but this get fails
+ #Get the cluster URL to tell the user to go check their application
+ response = requests.get(broker_url)
+ response.raise_for_status() #bomb if not 2xx
+ cdap_cluster = response.json()["managed cdap url"]
+
+ #Fetch the Application's AppConfig to show them what the bound config looks like:
+ #TODO: This should be an endpoint in the broker. I filed an issue in the broker. For now, do the horrendous special character mapping here.
+ #TODO: This only fetches AppConfig, add AppPreferences
+ ns = "default" if "namespace" not in broker_put else broker_put["namespace"]
+ mapped_appname = ''.join(e for e in instance_name if e.isalnum())
+ r = requests.get("{0}/v3/namespaces/{1}/apps/{2}".format(cdap_cluster, ns, mapped_appname)).json()
+ config = r["configuration"]
+
+ _logger.info("Deployment Complete!")
+ _logger.info("The CDAP cluster API is at {0}. The *GUI* Port is {1}. You may now go check your application there to confirm it is running correctly.".format(cdap_cluster, response.json()["cdap GUI port"]))
+ _logger.info("Your instance name is: {0}. In CDAP, this will appear as: {1}".format(instance_name, mapped_appname))
+ _logger.info("The bound Configuration for this application is: {0}".format(config))
+
+ #TODO: Should we tell the user about metrics and healthcheck to try those too?
+
+def normalize_cdap_params(spec):
+ """
+ The CDAP component specification includes some optional fields that the broker expects.
+ This parses the specification, includes those fields if those are there, and sets the broker defaults otherwise
+ """
+ Params = {}
+ p = spec["parameters"]
+ #app preferences
+ Params["app_preferences"] = {} if "app_preferences" not in p else {param["name"] : param["value"] for param in p["app_preferences"]}
+ #app config
+ Params["app_config"] = {} if "app_config" not in p else {param["name"] : param["value"] for param in p["app_config"]}
+ #program preferences
+ if "program_preferences" not in p:
+ Params["program_preferences"] = []
+ else:
+ Params["program_preferences"] = []
+ for tup in p["program_preferences"]:
+ Params["program_preferences"].append({"program_id" : tup["program_id"],
+ "program_type" : tup["program_type"],
+ "program_pref" : {param["name"] : param["value"] for param in tup["program_pref"]}})
+ return Params
+
+def undeploy_component(profile, instance_name):
+ """
+ Undeploys a CDAP Component, which in CDAP terms means stop and delete
+ """
+ broker_url = _get_broker_url_from_profile(profile)
+
+ #call the delete
+ response = requests.delete("{brokerurl}/application/{appname}".format(brokerurl=broker_url, appname=instance_name))
+ try:
+ response.raise_for_status() #bomb if not 2xx
+ _logger.info("Undeploy complete.")
+ return True
+ except Exception as e:
+ _logger.error("An undeploy Error Occured: {2}. Broker Response: {0}, Broker Response Text: {1}".format(response.status_code, response.text, e))
+ return False
+
diff --git a/mod/onboardingapi/dcae_cli/util/config.py b/mod/onboardingapi/dcae_cli/util/config.py
new file mode 100644
index 0000000..f9936c3
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/config.py
@@ -0,0 +1,156 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides dcae cli config utilities
+"""
+import os, re
+
+import click
+import six
+
+from dcae_cli import util
+from dcae_cli import _version
+from dcae_cli.util import get_app_dir, get_pref, update_pref, write_pref, pref_exists
+
+
+class ConfigurationInitError(RuntimeError):
+ pass
+
+def get_config_path():
+ '''Returns the absolute configuration file path'''
+ return os.path.join(get_app_dir(), 'config.json')
+
+
+def _init_config_user():
+ while True:
+ user = click.prompt('Please enter your user id', type=str).strip()
+
+ # There should be no special characters
+ if re.match("(?:\w*)\Z", user):
+ return user
+ else:
+ click.echo("Invalid user id. Please try again.")
+
+def _init_config_server_url():
+ return click.prompt("Please enter the remote server url", type=str).strip()
+
+def _init_config_db_url():
+ click.echo("Now we need to set up access to the onboarding catalog")
+ hostname = click.prompt("Please enter the onboarding catalog hostname").strip()
+ user = click.prompt("Please enter the onboarding catalog user").strip()
+ password = click.prompt("Please enter the onboarding catalog password").strip()
+ return "postgresql://{user}:{password}@{hostname}:5432/dcae_onboarding_db".format(
+ hostname=hostname, user=user, password=password)
+
+def _init_config():
+ '''Returns an initial dict for populating the config'''
+ # Grab the remote config and merge it in
+ new_config = {}
+
+ try:
+ server_url = _init_config_server_url()
+ new_config = util.fetch_file_from_web(server_url, "/dcae-cli/config.json")
+ except:
+ # Failing to pull seed configuration from remote server is not considered
+ # a problem. Just continue and give user the option to set it up
+ # themselves.
+ if not click.confirm("Could not download initial configuration from remote server. Attempt manually setting up?"):
+ raise ConfigurationInitError("Could not setup dcae-cli configuration")
+
+ # UPDATE: Keeping the server url even though the config was not found there.
+ new_config["server_url"] = server_url
+ new_config["user"] = _init_config_user()
+ new_config["cli_version"] = _version.__version__
+
+ if "db_url" not in new_config or not new_config["db_url"]:
+ # The seed configuration was not provided so manually set up the db
+ # connection
+ new_config["db_url"] = _init_config_db_url()
+
+ if "active_profile" not in new_config:
+ # The seed configuration was not provided which means the profiles will
+ # be the same. The profile will be hardcoded to a an empty default.
+ new_config["active_profile"] = "default"
+
+ return new_config
+
+
+def should_force_reinit(config):
+ """Configs older than 2.0.0 should be replaced"""
+ ver = config.get("cli_version", "0.0.0")
+ return int(ver.split(".")[0]) < 2
+
+def get_config():
+ '''Returns the configuration dictionary'''
+ return get_pref(get_config_path(), _init_config)
+
+def get_server_url():
+ """Returns the remote server url
+
+ The remote server holds the artifacts that the dcae-cli requires like the
+ seed config json and seed profiles json, and json schemas.
+ """
+ return get_config().get("server_url")
+
+def get_docker_logins_key():
+ """Returns the Consul key that Docker logins are stored under
+
+ Default is "docker_plugin/docker_logins" which matches up with the docker
+ plugin default.
+ """
+ return get_config().get("docker_logins_key", "docker_plugin/docker_logins")
+
+# These functions are used to fetch the configurable path to the various json
+# schema files used in validation.
+
+def get_path_component_spec():
+ return get_config().get("path_component_spec",
+ "/schemas/component-specification/dcae-cli-v2/component-spec-schema.json")
+
+def get_path_data_format():
+ return get_config().get("path_data_format",
+ "/schemas/data-format/dcae-cli-v1/data-format-schema.json")
+
+def get_active_profile():
+ return get_config().get("active_profile", None)
+
+
+def update_config(**kwargs):
+ '''Updates and returns the configuration dictionary'''
+ return update_pref(path=get_config_path(), init_func=get_config, **kwargs)
+
+
+def _reinit_config(init_func):
+ new_config = init_func()
+ config_path = get_config_path()
+
+ if pref_exists(config_path):
+ existing_config = get_config()
+ # Make sure to clobber existing values and not other way
+ existing_config.update(new_config)
+ new_config = existing_config
+
+ write_pref(new_config, config_path)
+ return new_config
+
+def reinit_config():
+ return _reinit_config(_init_config)
diff --git a/mod/onboardingapi/dcae_cli/util/discovery.py b/mod/onboardingapi/dcae_cli/util/discovery.py
new file mode 100644
index 0000000..e8d2ff8
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/discovery.py
@@ -0,0 +1,777 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides Consul helper functions
+"""
+import re
+import json
+import contextlib
+from collections import defaultdict
+from itertools import chain
+from functools import partial
+from datetime import datetime
+from uuid import uuid4
+
+import six
+from copy import deepcopy
+from consul import Consul
+
+from dcae_cli.util.logger import get_logger
+from dcae_cli.util.exc import DcaeException
+from dcae_cli.util.profiles import get_profile
+from dcae_cli.util.config import get_docker_logins_key
+
+import os
+import click
+
+logger = get_logger('Discovery')
+
+# NOTE: Removed the suffix completely. The useful piece of the suffix was the
+# location but it was implemented in a static fashion (hardcoded). Rather than
+# enhancing the existing approach and making the suffix dynamic (to support
+# "rework-central" and "solutioning"), the thinking is to revisit this name stuff
+# and use Consul's query interface so that location is a tag attribute.
+_inst_re = re.compile(r"^(?P<user>[^.]*).(?P<hash>[^.]*).(?P<ver>\d+-\d+-\d+).(?P<comp>.*)$")
+
+
+class DiscoveryError(DcaeException):
+ pass
+
+class DiscoveryNoDownstreamComponentError(DiscoveryError):
+ pass
+
+
+def default_consul_host():
+ """Return default consul host
+
+ This method was created to purposefully make fetching the default lazier than
+ the previous impl. The previous impl had the default as a global variable and
+ thus requiring the configuration to be setup before doing anything further.
+ The pain point of that impl is in unit testing where now all code that
+ imported this module had a strict dependency upon the impure configuration.
+ """
+ return get_profile().consul_host
+
+
+def _choose_consul_host(consul_host):
+ """Chooses the appropriate consul host
+
+ Chooses between a provided value and a default
+ """
+ return default_consul_host() if consul_host == None else consul_host
+
+
+def replace_dots(comp_name, reverse=False):
+ '''Converts dots to dashes to prevent downstream users of Consul from exploding'''
+ if not reverse:
+ return comp_name.replace('.', '-')
+ else:
+ return comp_name.replace('-', '.')
+
+# Utility functions for using Consul
+
+def _is_healthy_pure(get_health_func, instance):
+ """Checks to see if a component instance is running healthy
+
+ Pure function edition
+
+ Args
+ ----
+ get_health_func: func(string) -> complex object
+ Look at unittests in test_discovery to see examples
+ instance: (string) fully qualified name of component instance
+
+ Returns
+ -------
+ True if instance has been found and is healthy else False
+ """
+ index, resp = get_health_func(instance)
+
+ if resp:
+ def is_passing(instance):
+ return all([check["Status"] == "passing" for check in instance["Checks"]])
+ return any([is_passing(instance) for instance in resp])
+ else:
+ return False
+
+def is_healthy(consul_host, instance):
+ """Checks to see if a component instance is running healthy
+
+ Impure function edition
+
+ Args
+ ----
+ consul_host: (string) host string of Consul
+ instance: (string) fully qualified name of component instance
+
+ Returns
+ -------
+ True if instance has been found and is healthy else False
+ """
+ cons = Consul(consul_host)
+ return _is_healthy_pure(cons.health.service, instance)
+
+def _get_instances_from_kv(get_from_kv_func, user):
+ """Get component instances from kv store
+
+ Deployed component instances get entries in a kv store to store configuration
+ information. This is a way to source a list of component instances that were
+ attempted to run. A component could have deployed but failed to register itself.
+ The only trace of that deployment would be checking the kv store.
+
+ Args
+ ----
+ get_from_kv_func: func(string, boolean) -> (don't care, list of dicts)
+ Look at unittests in test_discovery to see examples
+ user: (string) user id
+
+ Returns
+ -------
+ List of unique component instance names
+ """
+ # Keys from KV contain rels key entries and non-rels key entries. Keep the
+ # rels key entries but remove the ":rel" suffix because we are paranoid that
+ # this could exist without the other
+ _, instances_kv = get_from_kv_func(user, recurse=True)
+ return [] if instances_kv is None \
+ else list(set([ dd["Key"].replace(":rel", "") for dd in instances_kv ]))
+
+def _get_instances_from_catalog(get_from_catalog_func, user):
+ """Get component instances from catalog
+
+ Fetching instances from the catalog covers the deployment cases where
+ components registered successfully regardless of their health check status.
+
+ Args
+ ----
+ get_from_catalog_func: func() -> (don't care, dict)
+ Look at unittests in test_discovery to see examples
+ user: (string) user id
+
+ Returns
+ -------
+ List of unique component instance names
+ """
+ # Get all services and filter here by user
+ response = get_from_catalog_func()
+ return list(set([ instance for instance in response[1].keys() if user in instance ]))
+
+def _merge_instances(user, *get_funcs):
+ """Merge the result of an arbitrary list of get instance function calls
+
+ Args
+ ----
+ user: (string) user id
+ get_funcs: func(string) -> list of strings
+ Functions that take in a user parameter to output a list of instance
+ names
+
+ Returns
+ -------
+ List of unique component instance names
+ """
+ return list(set(chain.from_iterable([ get_func(user) for get_func in get_funcs ])))
+
+def _get_instances(consul_host, user):
+ """Get all deployed component instances for a given user
+
+ Sourced from multiple places to ensure we get a complete list of all
+ component instances no matter what state they are in.
+
+ Args
+ ----
+ consul_host: (string) host string of Consul
+ user: (string) user id
+
+ Returns
+ -------
+ List of unique component instance names
+ """
+ cons = Consul(consul_host)
+
+ get_instances_from_kv = partial(_get_instances_from_kv, cons.kv.get)
+ get_instances_from_catalog = partial(_get_instances_from_catalog, cons.catalog.services)
+
+ return _merge_instances(user, get_instances_from_kv, get_instances_from_catalog)
+
+
+# Custom (sometimes higher order) "discovery" functionality
+
+def _make_instances_map(instances):
+ """Make an instance map
+
+ Instance map is a dict where the keys are tuples (component type, component version)
+ that map to a set of strings that are instance names.
+ """
+ mapping = defaultdict(set)
+ for instance in instances:
+ match = _inst_re.match(instance)
+ if match is None:
+ continue
+
+ _, _, ver, comp = match.groups()
+ cname = replace_dots(comp, reverse=True)
+ version = replace_dots(ver, reverse=True)
+ key = (cname, version)
+ mapping[key].add(instance)
+ return mapping
+
+
+def get_user_instances(user, consul_host=None, filter_instances_func=is_healthy):
+ '''Get a user's instance map
+
+ Args:
+ -----
+ filter_instances_func: fn(consul_host, instance) -> boolean
+ Function used to filter instances. Default is is_healthy
+
+ Returns:
+ --------
+ Dict whose keys are component (name,version) tuples and values are list of component instance names
+ '''
+ consul_host = _choose_consul_host(consul_host)
+ filter_func = partial(filter_instances_func, consul_host)
+ instances = list(filter(filter_func, _get_instances(consul_host, user)))
+
+ return _make_instances_map(instances)
+
+
+def _get_component_instances(filter_instances_func, user, cname, cver, consul_host):
+ """Get component instances that are filtered
+
+ Args:
+ -----
+ filter_instances_func: fn(consul_host, instance) -> boolean
+ Function used to filter instances
+
+ Returns
+ -------
+ List of strings where the strings are fully qualified instance names
+ """
+ instance_map = get_user_instances(user, consul_host=consul_host,
+ filter_instances_func=filter_instances_func)
+
+ # REVIEW: We don't restrict component names from using dashes. We do
+ # transform names with dots to use dashes for domain segmenting reasons.
+ # Instance map creation always reverses that making dashes to dots even though
+ # the component name may have dashes. Thus always search for instances by
+ # a dotted component name. We are open to a collision but that is low chance
+ # - someone has to use the same name in dotted and dashed form which is weird.
+ cname_dashless = replace_dots(cname, reverse=True)
+
+ # WATCH: instances_map.get returns set. Force to be list to have consistent
+ # return
+ return list(instance_map.get((cname_dashless, cver), []))
+
+def get_healthy_instances(user, cname, cver, consul_host=None):
+ """Lists healthy instances of a particular component for a given user
+
+ Returns
+ -------
+ List of strings where the strings are fully qualified instance names
+ """
+ consul_host = _choose_consul_host(consul_host)
+ return _get_component_instances(is_healthy, user, cname, cver, consul_host)
+
+def get_defective_instances(user, cname, cver, consul_host=None):
+ """Lists *not* running instances of a particular component for a given user
+
+ This means that there are component instances that are sitting out there
+ deployed but not successfully running.
+
+ Returns
+ -------
+ List of strings where the strings are fully qualified instance names
+ """
+ def is_not_healthy(consul_host, component):
+ return not is_healthy(consul_host, component)
+
+ consul_host = _choose_consul_host(consul_host)
+ return _get_component_instances(is_not_healthy, user, cname, cver, consul_host)
+
+
+def lookup_instance(consul_host, name):
+ """Query Consul for service details"""
+ cons = Consul(consul_host)
+ index, results = cons.catalog.service(name)
+ return results
+
+def parse_instance_lookup(results):
+ """Parse the resultset from lookup_instance
+
+ Returns:
+ --------
+ String in host form <address>:<port>
+ """
+ if results:
+ # Just grab first
+ result = results[0]
+ return "{address}:{port}".format(address=result["ServiceAddress"],
+ port=result["ServicePort"])
+ else:
+ return
+
+
+def _create_rels_key(config_key):
+ """Create rels key from config key
+
+ Assumes config_key is well-formed"""
+ return "{:}:rel".format(config_key)
+
+
+def _create_dmaap_key(config_key):
+ """Create dmaap key from config key
+
+ Assumes config_key is well-formed"""
+ return "{:}:dmaap".format(config_key)
+
+
+def _create_policies_key(config_key):
+ """Create policies key from config key
+
+ Assumes config_key is well-formed"""
+ return "{:}:policies/".format(config_key)
+
+def clear_user_instances(user, host=None):
+ '''Removes all Consul key:value entries for a given user'''
+ host = _choose_consul_host(host)
+ cons = Consul(host)
+ cons.kv.delete(user, recurse=True)
+
+
+_multiple_compat_msg = '''Component '{cname}' config_key '{ckey}' has multiple compatible downstream \
+components: {compat}. The current infrastructure can only support interacing with a single component. \
+Only downstream component '{chosen}' will be connected.'''
+
+_no_compat_msg = "Component '{cname}' config_key '{ckey}' has no compatible downstream components."
+
+_no_inst_msg = '''Component '{cname}' config_key '{ckey}' is compatible with downstream component '{chosen}' \
+however there are no instances available for connecting.'''
+
+
+def _cfmt(*args):
+ '''Returns a string formatted representation for a component and version'''
+ if len(args) == 1:
+ return ':'.join(args[0])
+ elif len(args) == 2:
+ return ':'.join(args)
+ else:
+ raise DiscoveryError('Input should be name, version or (name, version)')
+
+
+def _get_downstream(cname, cver, config_key, compat_comps, instance_map,
+ force=False):
+ '''
+ Returns a component type and its instances to use for a given config key
+
+ Parameters
+ ----------
+ cname : string
+ Name of the upstream component
+ cver : string
+ Version of the upstream component
+ config_key : string
+ Mainly used for populating warnings meaningfully
+ compat_comps : dict
+ A list of component (name, version) tuples
+ instance_map : dict
+ A dict whose keys are component (name, version) tuples and values are a list of instance names
+ '''
+ if not compat_comps:
+ conn_comp = ('', '')
+ logger.warning(_no_compat_msg.format(cname=_cfmt(cname, cver), ckey=config_key))
+ else:
+ conn_comp = six.next(iter(compat_comps))
+ if len(compat_comps) > 1:
+ logger.warning(_multiple_compat_msg.format(cname=_cfmt(cname, cver), ckey=config_key,
+ compat=list(map(_cfmt, compat_comps)), chosen=_cfmt(conn_comp)))
+ if all(conn_comp):
+ instances = instance_map.get(conn_comp, tuple())
+ if not instances:
+ if force:
+ logger.warning(_no_inst_msg.format(cname=_cfmt(cname, cver), \
+ ckey=config_key, chosen=_cfmt(conn_comp)))
+ else:
+ logger.error(_no_inst_msg.format(cname=_cfmt(cname, cver), \
+ ckey=config_key, chosen=_cfmt(conn_comp)))
+ raise DiscoveryNoDownstreamComponentError("No compatible downstream component found.")
+ else:
+ instances = tuple()
+
+ return conn_comp, instances
+
+
+def create_config(user, cname, cver, params, interface_map, instance_map, dmaap_map,
+ instance_prefix=None, force=False):
+ '''
+ Creates a config and corresponding rels entries in Consul. Returns the Consul the keys and entries.
+
+ Parameters
+ ----------
+ user : string
+ The user namespace to create the config and rels under. E.g. user.foo.bar...
+ cname : string
+ Name of the upstream component
+ cver : string
+ Version of the upstream component
+ params : dict
+ Parameters of the component, taken directly from the component specification
+ interface_map : dict
+ A dict mapping the config_key of published streams and/or called services to a list of compatible
+ component types and versions
+ instance_map : dict
+ A dict mapping component types and versions to a list of instances currently running
+ dmaap_map : dict
+ A dict that contains config key to dmaap information. This map is checked
+ first before checking the instance_map which means before checking for
+ direct http components.
+ instance_prefix : string, optional
+ The unique prefix to associate with the component instance whose config is being created
+ force: string, optional
+ Config will continue to be created even if there are no downstream compatible
+ component when this flag is set to True. Default is False.
+ '''
+ inst_pref = str(uuid4()) if instance_prefix is None else instance_prefix
+ conf_key = "{:}.{:}.{:}.{:}".format(user, inst_pref, replace_dots(cver), replace_dots(cname))
+ rels_key = _create_rels_key(conf_key)
+ dmaap_key = _create_dmaap_key(conf_key)
+
+ conf = params.copy()
+ rels = list()
+
+ # NOTE: The dmaap_map entries are broken up between the templetized config
+ # and the dmaap json in Consul
+ for config_key, dmaap_goodies in six.iteritems(dmaap_map):
+ conf[config_key] = deepcopy(dmaap_map[config_key])
+ # Here comes the magic. << >> signifies dmaap to downstream config
+ # binding service.
+ conf[config_key]["dmaap_info"] = "<<{:}>>".format(config_key)
+
+ # NOTE: The interface_map may not contain *all* possible interfaces
+ # that may be connected with because the catalog.get_discovery call filters
+ # based upon neighbors. Essentailly the interface_map is being pre-filtered
+ # which is probably a latent bug.
+
+ for config_key, compat_types in six.iteritems(interface_map):
+ # Don't clobber config keys that have been set from above
+ if config_key not in conf:
+ conn_comp, instances = _get_downstream(cname, cver, config_key, \
+ compat_types, instance_map, force=force)
+ conn_name, conn_ver = conn_comp
+ middle = ''
+
+ if conn_name and conn_ver:
+ middle = "{:}.{:}".format(replace_dots(conn_ver), replace_dots(conn_name))
+ else:
+ if not force:
+ raise DiscoveryNoDownstreamComponentError("No compatible downstream component found.")
+
+ config_val = '{{' + middle + '}}'
+ conf[config_key] = config_val
+ rels.extend(instances)
+
+ dmaap_map_just_info = { config_key: v["dmaap_info"]
+ for config_key, v in six.iteritems(dmaap_map) }
+ return conf_key, conf, rels_key, rels, dmaap_key, dmaap_map_just_info
+
+
+def get_docker_logins(host=None):
+ """Get Docker logins from Consul
+
+ Returns
+ -------
+ List of objects where the objects must be of the form
+ {"registry": .., "username":.., "password":.. }
+ """
+ key = get_docker_logins_key()
+ host = _choose_consul_host(host)
+ (index, val) = Consul(host).kv.get(key)
+
+ if val:
+ return json.loads(val['Value'].decode("utf-8"))
+ else:
+ return []
+
+
+def push_config(conf_key, conf, rels_key, rels, dmaap_key, dmaap_map, host=None):
+ '''Uploads the config and rels to Consul'''
+ host = _choose_consul_host(host)
+ cons = Consul(host)
+ for k, v in ((conf_key, conf), (rels_key, rels), (dmaap_key, dmaap_map)):
+ cons.kv.put(k, json.dumps(v))
+
+ logger.info("* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *")
+ logger.info("* If you run a 'component reconfig' command, you must first execute the following")
+ logger.info("* export SERVICE_NAME={:}".format(conf_key))
+ logger.info("* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *")
+
+
+def remove_config(config_key, host=None):
+ """Deletes a config from Consul
+
+ Returns
+ -------
+ True when all artifacts have been successfully deleted else False
+ """
+ host = _choose_consul_host(host)
+ cons = Consul(host)
+ # "recurse=True" deletes the SERVICE_NAME KV and all other KVs with suffixes (:rel, :dmaap, :policies)
+ results = cons.kv.delete(config_key, recurse=True)
+
+ return results
+
+
+def _group_config(config, config_key_map):
+ """Groups config by streams_publishes, streams_subscribes, services_calls"""
+ # Copy non streams and services first
+ grouped_conf = { k: v for k,v in six.iteritems(config)
+ if k not in config_key_map }
+
+ def group(group_name):
+ grouped_conf[group_name] = { k: v for k,v in six.iteritems(config)
+ if k in config_key_map and config_key_map[k]["group"] == group_name }
+
+ # Copy and group the streams and services
+ # Map returns iterator so must force running its course
+ list(map(group, ["streams_publishes", "streams_subscribes", "services_calls"]))
+ return grouped_conf
+
+
+def _apply_inputs(config, inputs_map):
+ """Update configuration with inputs
+
+ This method updates the values of the configuration parameters using values
+ from the inputs map.
+ """
+ config.update(inputs_map)
+ return config
+
+
+@contextlib.contextmanager
+def config_context(user, cname, cver, params, interface_map, instance_map,
+ config_key_map, dmaap_map={}, inputs_map={}, instance_prefix=None,
+ host=None, always_cleanup=True, force_config=False):
+ '''Convenience utility for creating configs and cleaning them up
+
+ Args
+ ----
+ always_cleanup: (boolean)
+ This context manager will cleanup the produced config
+ context always if this is True. When False, cleanup will only occur upon any
+ exception getting thrown in the context manager block. Default is True.
+ force: (boolean)
+ Config will continue to be created even if there are no downstream compatible
+ component when this flag is set to True. Default is False.
+ '''
+ host = _choose_consul_host(host)
+
+ try:
+ conf_key, conf, rels_key, rels, dmaap_key, dmaap_map = create_config(
+ user, cname, cver, params, interface_map, instance_map, dmaap_map,
+ instance_prefix, force=force_config)
+
+ conf = _apply_inputs(conf, inputs_map)
+ conf = _group_config(conf, config_key_map)
+
+ push_config(conf_key, conf, rels_key, rels, dmaap_key, dmaap_map, host)
+ yield (conf_key, conf)
+ except Exception as e:
+ if not always_cleanup:
+ try:
+ conf_key, rels_key, host
+ except UnboundLocalError:
+ pass
+ else:
+ remove_config(conf_key, host)
+
+ raise e
+ finally:
+ if always_cleanup:
+ try:
+ conf_key, rels_key, host
+ except UnboundLocalError:
+ pass
+ else:
+ remove_config(conf_key, host)
+
+
+def policy_update(policy_change_file, consul_host):
+
+ # Determine if it is an 'updated_policies' or 'removed_policies' change, or if user included ALL policies
+ policies = True if "policies" in policy_change_file.keys() else False
+ updated = True if "updated_policies" in policy_change_file.keys() else False
+ removed = True if "removed_policies" in policy_change_file.keys() else False
+
+ cons = Consul(consul_host)
+ service_name = os.environ["SERVICE_NAME"]
+ policy_folder = service_name + ":policies/items/"
+ event_folder = service_name + ":policies/event"
+
+ if policies:
+ # User specified ALL "policies" in the Policy File. Ignore "updated_policies"/"removed_policies"
+ logger.warning("The 'policies' specified in the 'policy-file' will replace all policies in Consul.")
+ allPolicies = policy_change_file['policies']
+ if not update_all_policies(cons, policy_folder, allPolicies):
+ return False
+
+ else:
+ # If 'removed_policies', delete the Policy from the Component KV pair
+ if removed:
+ policyDeletes = policy_change_file['removed_policies']
+ if not remove_policies(cons, policy_folder, policyDeletes):
+ return False
+
+ # If 'updated_policies', update the Component KV pair
+ if updated:
+ policyUpdates = policy_change_file['updated_policies']
+ if not update_specified_policies(cons, policy_folder, policyUpdates):
+ return False
+
+ return create_policy_event(cons, event_folder, policy_folder)
+
+
+def create_policy_event(cons, event_folder, policy_folder):
+ """ Create a Policy 'event' KV pair in Consol """
+
+ timestamp = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ update_id = str(uuid4())
+ policies = cons.kv.get(policy_folder, recurse=True)
+ policies_count = str(policies).count("'Key':")
+
+ event = '{"action": "gathered", "timestamp": "' + timestamp + '", "update_id": "' + update_id + '", "policies_count": ' + str(policies_count) + '}'
+ if not cons.kv.put(event_folder, event):
+ logger.error("Policy 'Event' creation of ({:}) in Consul failed".format(event_folder))
+ return False
+
+ return True
+
+
+def update_all_policies(cons, policy_folder, allPolicies):
+ """ Delete all policies from Consul, then add the policies the user specified in the 'policies' section of the policy-file """
+
+ if not cons.kv.delete(policy_folder, recurse=True): # Deletes all Policies under the /policies/items folder
+ logger.error("Policy delete of ({:}) in Consul failed".format(policy_folder))
+ return False
+
+ if not update_specified_policies(cons, policy_folder, allPolicies):
+ return False
+
+ return True
+
+def update_specified_policies(cons, policy_folder, policyUpdates):
+ """ Replace the policies the user specified in the 'updated_policies' (or 'policies') section of the policy-file """
+
+ for policy in policyUpdates:
+ policy_folder_id = extract_policy_id(policy_folder, policy)
+ if policy_folder_id:
+ policyBody = json.dumps(policy)
+ if not cons.kv.put(policy_folder_id, policyBody):
+ logger.error("Policy update of ({:}) in Consul failed".format(policy_folder_id))
+ return False
+ else:
+ return False
+
+ return True
+
+
+def remove_policies(cons, policy_folder, policyDeletes):
+ """ Delete the policies that the user specified in the 'removed_policies' section of the policy-file """
+
+ for policy in policyDeletes:
+ policy_folder_id = extract_policy_id(policy_folder, policy)
+ if policy_folder_id:
+ if not cons.kv.delete(policy_folder_id):
+ logger.error("Policy delete of ({:}) in Consul failed".format(policy_folder_id))
+ return False
+ else:
+ return False
+
+ return True
+
+def extract_policy_id(policy_folder, policy):
+ """ Extract the Policy ID from the policyName.
+ Return the Consul key (Policy Folder with Policy ID) """
+
+ policyId_re = re.compile(r"(.*)\.\d+\.[a-zA-Z]+$")
+
+ policyName = policy['policyName'] # Extract the policy Id "Consul Key" from the policy name
+ match = policyId_re.match(policyName)
+
+ if match:
+ policy_id = match.group(1)
+ policy_folder_id = policy_folder + policy_id
+
+ return policy_folder_id
+ else:
+ logger.error("policyName ({:}) needs to end in '.#.xml' in order to extract the Policy ID".format(policyName))
+ return
+
+
+def build_policy_command(policy_reconfig_path, policy_change_file, consul_host):
+ """ Build command to execute the Policy Reconfig script in the Docker container """
+
+ # Determine if it is an 'updated_policies' and/or 'removed_policies' change, or if user included ALL policies
+ all_policies = True if "policies" in policy_change_file.keys() else False
+ updated = True if "updated_policies" in policy_change_file.keys() else False
+ removed = True if "removed_policies" in policy_change_file.keys() else False
+
+ # Create the Reconfig Script command (3 parts: Command and 2 ARGs)
+ command = []
+ command.append(policy_reconfig_path)
+ command.append("policies")
+
+ # Create a Dictionary of 'updated', 'removed', and 'ALL' policies
+
+ # 'updated' policies - policies come from the --policy-file
+ if updated:
+ updated_policies = policy_change_file['updated_policies']
+ else: updated_policies = []
+
+ policies = {}
+ policies["updated_policies"] = updated_policies
+
+ # 'removed' policies - policies come from the --policy-file
+ if removed:
+ removed_policies = policy_change_file['removed_policies']
+ else: removed_policies = []
+
+ policies["removed_policies"] = removed_policies
+
+ # ALL 'policies' - policies come from Consul
+ cons = Consul(consul_host)
+ service_name = os.environ["SERVICE_NAME"]
+ policy_folder = service_name + ":policies/items/"
+
+ id, consul_policies = cons.kv.get(policy_folder, recurse=True)
+
+ policy_values = []
+ if consul_policies:
+ for policy in consul_policies:
+ policy_value = json.loads(policy['Value'])
+ policy_values.append(policy_value)
+
+ policies["policies"] = policy_values
+
+ # Add the policies to the Docker "command" as a JSON string
+ command.append(json.dumps(policies))
+
+ return command
diff --git a/mod/onboardingapi/dcae_cli/util/dmaap.py b/mod/onboardingapi/dcae_cli/util/dmaap.py
new file mode 100644
index 0000000..138e909
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/dmaap.py
@@ -0,0 +1,358 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+"""
+Functions for DMaaP integration
+"""
+import six
+import logging
+from jsonschema import validate, ValidationError
+from dcae_cli.util import reraise_with_msg
+from dcae_cli.util.logger import get_logger
+from dcae_cli.catalog.mock.schema import apply_defaults
+
+
+logger = get_logger('Dmaap')
+
+_SCHEMA = {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title": "Schema for dmaap inputs",
+ "type": "object",
+ "oneOf": [
+ { "$ref": "#/definitions/message_router" },
+ { "$ref": "#/definitions/data_router_publisher" },
+ { "$ref": "#/definitions/data_router_subscriber" }
+ ],
+ "definitions": {
+ "message_router": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": ["message_router"]
+ },
+ "aaf_username": {
+ "type": "string",
+ "default": None
+ },
+ "aaf_password": {
+ "type": "string",
+ "default": None
+ },
+ "dmaap_info": {
+ "type": "object",
+ "properties": {
+ "client_role": {
+ "type": "string",
+ "default": None
+ },
+ "client_id": {
+ "type": "string",
+ "default": None
+ },
+ "location": {
+ "type": "string",
+ "default": None
+ },
+ "topic_url": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "topic_url"
+ ],
+ "additionalProperties": False
+ }
+ },
+ "required": [
+ "type",
+ "dmaap_info"
+ ],
+ "additionalProperties": False
+ },
+ "data_router_publisher": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": ["data_router"]
+ },
+ "dmaap_info": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "default": None,
+ "description": "the DCAE location for the publisher, used to set up routing"
+ },
+ "publish_url": {
+ "type": "string",
+ "description": "the URL to which the publisher makes Data Router publish requests"
+ },
+ "log_url": {
+ "type": "string",
+ "default": None,
+ "description": "the URL from which log data for the feed can be obtained"
+ },
+ "username": {
+ "type": "string",
+ "default": None,
+ "description": "the username the publisher uses to authenticate to Data Router"
+ },
+ "password": {
+ "type": "string",
+ "default": None,
+ "description": "the password the publisher uses to authenticate to Data Router"
+ },
+ "publisher_id": {
+ "type": "string",
+ "default": ""
+ }
+ },
+ "required": [
+ "publish_url"
+ ],
+ "additionalProperties": False
+ }
+ },
+ "required": [
+ "type",
+ "dmaap_info"
+ ],
+ "additionalProperties": False
+ },
+ "data_router_subscriber": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": ["data_router"]
+ },
+ "dmaap_info": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "default": None,
+ "description": "the DCAE location for the publisher, used to set up routing"
+ },
+ "delivery_url": {
+ "type": "string",
+ "description": "the URL to which the Data Router should deliver files"
+ },
+ "username": {
+ "type": "string",
+ "default": None,
+ "description": "the username Data Router uses to authenticate to the subscriber when delivering files"
+ },
+ "password": {
+ "type": "string",
+ "default": None,
+ "description": "the username Data Router uses to authenticate to the subscriber when delivering file"
+ },
+ "subscriber_id": {
+ "type": "string",
+ "default": ""
+ }
+ },
+ "additionalProperties": False
+ }
+ },
+ "required": [
+ "type",
+ "dmaap_info"
+ ],
+ "additionalProperties": False
+ }
+ }
+}
+
+
+_validation_msg = """
+Is your DMaaP client object a valid json?
+Does your DMaaP client object follow this format?
+
+Message router:
+
+ {
+ "aaf_username": <string, optional>,
+ "aaf_password": <string, optional>,
+ "type": "message_router",
+ "dmaap_info": {
+ "client_role": <string, optional>,
+ "client_id": <string, optional>,
+ "location": <string, optional>,
+ "topic_url": <string, required>
+ }
+ }
+
+Data router (publisher):
+
+ {
+ "type": "data_router",
+ "dmaap_info": {
+ "location": <string, optional>,
+ "publish_url": <string, required>,
+ "log_url": <string, optional>,
+ "username": <string, optional>,
+ "password": <string, optional>,
+ "publisher_id": <string, optional>
+ }
+ }
+
+Data router (subscriber):
+
+ {
+ "type": "data_router",
+ "dmaap_info": {
+ "location": <string, optional>,
+ "delivery_url": <string, optional>,
+ "username": <string, optional>,
+ "password": <string, optional>,
+ "subscriber_id": <string, optional>
+ }
+ }
+
+"""
+
+def validate_dmaap_map_schema(dmaap_map):
+ """Validate the dmaap map schema"""
+ for k, v in six.iteritems(dmaap_map):
+ try:
+ validate(v, _SCHEMA)
+ except ValidationError as e:
+ logger.error("DMaaP validation issue with \"{k}\"".format(k=k))
+ logger.error(_validation_msg)
+ reraise_with_msg(e, as_dcae=True)
+
+
+class DMaaPValidationError(RuntimeError):
+ pass
+
+def _find_matching_definition(instance):
+ """Find and return matching definition given an instance"""
+ for subsection in ["message_router", "data_router_publisher",
+ "data_router_subscriber"]:
+ try:
+ validate(instance, _SCHEMA["definitions"][subsection])
+ return _SCHEMA["definitions"][subsection]
+ except ValidationError:
+ pass
+
+ # You should never get here but just in case..
+ logger.error("No matching definition: {0}".format(instance))
+ raise DMaaPValidationError("No matching definition")
+
+def apply_defaults_dmaap_map(dmaap_map):
+ """Apply the defaults to the dmaap map"""
+ def grab_properties(instance):
+ return _find_matching_definition(instance)["properties"]
+
+ return { k: apply_defaults(grab_properties(v), v) for k,v in
+ six.iteritems(dmaap_map) }
+
+
+def validate_dmaap_map_entries(dmaap_map, mr_config_keys, dr_config_keys):
+ """Validate dmaap map entries
+
+ Validate dmaap map to make sure all config keys are there and that there's
+ no additional config keys beceause this map is used in generating the
+ configuration json.
+
+ Returns:
+ --------
+ True when dmaap_map is ok and False when it is not
+ """
+ # Catch when there is no dmaap_map when there should be
+ if len(mr_config_keys) + len(dr_config_keys) > 0 and len(dmaap_map) == 0:
+ logger.error("You have dmaap streams defined in your specification")
+ logger.error("You must provide a dmaap json to resolve those dmaap streams.")
+ logger.error("Please use the \"--dmaap-file\" option")
+ return False
+
+ config_keys = dr_config_keys + mr_config_keys
+ # Look for missing keys
+ is_missing = lambda config_key: config_key not in dmaap_map
+ missing_keys = list(filter(is_missing, config_keys))
+
+ if missing_keys:
+ logger.error("Missing config keys in dmaap json: {0}".format(
+ ",".join(missing_keys)))
+ logger.error("Re-edit your dmaap json")
+ return False
+
+ # Look for unexpected keys
+ is_unexpected = lambda config_key: config_key not in config_keys
+ unexpected_keys = list(filter(is_unexpected, dmaap_map.keys()))
+
+ if unexpected_keys:
+ # NOTE: Changed this to a non-error in order to support the feature of
+ # developer having a master dmaap map
+ logger.warn("Unexpected config keys in dmaap json: {0}".format(
+ ",".join(unexpected_keys)))
+ return True
+
+ return True
+
+
+def update_delivery_urls(get_route_func, target_base_url, dmaap_map):
+ """Update delivery urls for dmaap map
+
+ This method picks out all the data router connections for subscribers and
+ updates the delivery urls with the supplied base target url concatentated
+ with the user specified route (or path).
+
+ Args:
+ -----
+ get_route_func (func): Function that takes a config_key and returns the route
+ used for the data router subscriber
+ target_base_url (string): "{http|https}://<hostname>:<port>"
+ dmaap_map (dict): DMaaP map is map of inputs that is config_key to provisioned
+ data router feed or message router topic connection details
+
+ Returns:
+ --------
+ Returns the updated DMaaP map
+ """
+ def update_delivery_url(config_key, dm):
+ route = get_route_func(config_key)
+ dm["dmaap_info"]["delivery_url"] = "{base}{tween}{path}".format(base=target_base_url,
+ path=route, tween="" if route[0] == "/" else "/")
+ return dm
+
+ def is_dr_subscriber(dm):
+ return dm["type"] == "data_router" and "publish_url" not in dm["dmaap_info"]
+
+ updated_map = { config_key: update_delivery_url(config_key, dm)
+ for config_key, dm in six.iteritems(dmaap_map) if is_dr_subscriber(dm) }
+ dmaap_map.update(updated_map)
+
+ return dmaap_map
+
+
+def list_delivery_urls(dmaap_map):
+ """List delivery urls
+
+ Returns:
+ --------
+ List of tuples (config_key, deliery_url)
+ """
+ return [(config_key, dm["dmaap_info"]["delivery_url"]) \
+ for config_key, dm in six.iteritems(dmaap_map) if "delivery_url" in dm["dmaap_info"]]
diff --git a/mod/onboardingapi/dcae_cli/util/docker_util.py b/mod/onboardingapi/dcae_cli/util/docker_util.py
new file mode 100644
index 0000000..90a6811
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/docker_util.py
@@ -0,0 +1,226 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides utilities for Docker components
+"""
+import socket
+from sys import platform
+
+import docker
+import six
+
+import dockering as doc
+from dcae_cli.util.logger import get_logger
+from dcae_cli.util.exc import DcaeException
+
+dlog = get_logger('Docker')
+
+_reg_img = 'gliderlabs/registrator:latest'
+# TODO: Source this from app's configuration [ONAP URL TBD]
+_reg_cmd = '-ip {:} consul://make-me-valid:8500'
+
+class DockerError(DcaeException):
+ pass
+
+class DockerConstructionError(DcaeException):
+ pass
+
+
+# Functions to provide envs to pass into Docker containers
+
+def _convert_profile_to_docker_envs(profile):
+ """Convert a profile object to Docker environment variables
+
+ Parameters
+ ----------
+ profile: namedtuple
+
+ Returns
+ -------
+ dict of environemnt variables to be used by docker-py
+ """
+ profile = profile._asdict()
+ return dict([(key.upper(), value) for key, value in six.iteritems(profile)])
+
+
+def build_envs(profile, docker_config, instance_name):
+ profile_envs = _convert_profile_to_docker_envs(profile)
+ health_envs = doc.create_envs_healthcheck(docker_config)
+ return doc.create_envs(instance_name, profile_envs, health_envs)
+
+
+# Methods to call Docker engine
+
+# TODO: Consolidate these two docker client methods. Need ability to invoke local
+# vs remote Docker engine
+
+def get_docker_client(profile, logins=[]):
+ hostname, port = profile.docker_host.split(":")
+ try:
+ client = doc.create_client(hostname, port, logins=logins)
+ client.ping()
+ return client
+ except:
+ raise DockerError('Could not connect to the Docker daemon. Is it running?')
+
+
+def image_exists(image):
+ '''Returns True if the image exists locally'''
+ client = docker.APIClient(version="auto", **docker.utils.kwargs_from_env())
+ return True if client.images(image) else False
+
+
+def _infer_ip():
+ '''Infers the IP address of the host running this tool'''
+ if not platform.startswith('linux'):
+ raise DockerError('Non-linux environment detected. Use the --external-ip flag when running Docker components.')
+ ip = socket.gethostbyname(socket.gethostname())
+ dlog.info("Docker host external IP address inferred to be {:}. If this is incorrect, use the --external-ip flag.".format(ip))
+ return ip
+
+
+def _run_container(client, config, name=None, wait=False):
+ '''Runs a container'''
+ if name is not None:
+ info = six.next(iter(client.containers(all=True, filters={'name': "^/{:}$".format(name)})), None)
+ if info is not None:
+ if info['State'] == 'running':
+ dlog.info("Container '{:}' was detected as already running.".format(name))
+ return info
+ else:
+ client.remove_container(info['Id'])
+
+ cont = doc.create_container_using_config(client, name, config)
+ client.start(cont)
+ info = client.inspect_container(cont)
+ name = info['Name'][1:] # remove '/' prefix
+ image = config['Image']
+ dlog.info("Running image '{:}' as '{:}'".format(image, name))
+
+ if not wait:
+ return info
+
+ cont_log = dlog.getChild(name)
+ try:
+ for msg in client.logs(cont, stream=True):
+ cont_log.info(msg.decode())
+ else:
+ dlog.info("Container '{:}' exitted suddenly.".format(name))
+ except (KeyboardInterrupt, SystemExit):
+ dlog.info("Stopping container '{:}' and cleaning up...".format(name))
+ client.kill(cont)
+ client.remove_container(cont)
+
+
+def _run_registrator(client, external_ip=None):
+ '''Ensures that Registrator is running'''
+
+ ip = _infer_ip() if external_ip is None else external_ip
+ cmd = _reg_cmd.format(ip).split()
+
+ binds={'/var/run/docker.sock': {'bind': '/tmp/docker.sock'}}
+ hconf = client.create_host_config(binds=binds, network_mode='host')
+ conf = client.create_container_config(image=_reg_img, command=cmd, host_config=hconf)
+
+ _run_container(client, conf, name='registrator', wait=False)
+
+
+# TODO: Need to revisit and reimplement _run_registrator(client, external_ip)
+
+#
+# High level calls
+#
+
+def deploy_component(profile, image, instance_name, docker_config, should_wait=False,
+ logins=[]):
+ """Deploy Docker component
+
+ This calls runs a Docker container detached. The assumption is that the Docker
+ host already has registrator running.
+
+ TODO: Split out the wait functionality
+
+ Args
+ ----
+ logins (list): List of objects where the objects are each a docker login of
+ the form:
+
+ {"registry": .., "username":.., "password":.. }
+
+ Returns
+ -------
+ Dict that is the result from a Docker inspect call
+ """
+ ports = docker_config.get("ports", None)
+ hcp = doc.add_host_config_params_ports(ports=ports)
+ volumes = docker_config.get("volumes", None)
+ hcp = doc.add_host_config_params_volumes(volumes=volumes, host_config_params=hcp)
+ # Thankfully passing in an IP will return back an IP
+ dh = profile.docker_host.split(":")[0]
+ _, _, dhips = socket.gethostbyname_ex(dh)
+
+ if dhips:
+ hcp = doc.add_host_config_params_dns(dhips[0], hcp)
+ else:
+ raise DockerConstructionError("Could not resolve the docker hostname:{0}".format(dh))
+
+ envs = build_envs(profile, docker_config, instance_name)
+ client = get_docker_client(profile, logins=logins)
+
+ config = doc.create_container_config(client, image, envs, hcp)
+
+ return _run_container(client, config, name=instance_name, wait=should_wait)
+
+
+def undeploy_component(client, image, instance_name):
+ """Undeploy Docker component
+
+ TODO: Handle error scenarios. Look into:
+ * no container found error - docker.errors.NotFound
+ * failure to remove image - docker.errors.APIError: 409 Client Error
+ * retry, check for still running container
+
+ Returns
+ -------
+ True if the container and associated image has been removed False otherwise
+ """
+ try:
+ client.remove_container(instance_name, force=True)
+ client.remove_image(image)
+ return True
+ except Exception as e:
+ dlog.error("Error while undeploying Docker container/image: {0}".format(e))
+ return False
+
+def reconfigure(client, instance_name, command):
+ """ Execute the Reconfig script in the Docker container """
+
+ # 'command' has 3 parts in a list (1 Command and 2 ARGs)
+ exec_Id = client.exec_create(container=instance_name, cmd=command)
+
+ exec_start_resp = client.exec_start(exec_Id, stream=True)
+
+ # Using a 'single' generator response to solve issue of 'start_exec' returning control after 6 minutes
+ for response in exec_start_resp:
+ dlog.info("Reconfig Script execution response: {:}".format(response))
+ exec_start_resp.close()
+ break
diff --git a/mod/onboardingapi/dcae_cli/util/exc.py b/mod/onboardingapi/dcae_cli/util/exc.py
new file mode 100644
index 0000000..7f41e0b
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/exc.py
@@ -0,0 +1,35 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides base exceptions
+"""
+import click
+
+
+class DcaeException(click.ClickException):
+ '''Base exception for dcae_util'''
+
+
+try:
+ FileNotFoundError = FileNotFoundError
+except NameError:
+ FileNotFoundError = IOError
diff --git a/mod/onboardingapi/dcae_cli/util/inputs.py b/mod/onboardingapi/dcae_cli/util/inputs.py
new file mode 100644
index 0000000..4b212e2
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/inputs.py
@@ -0,0 +1,40 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+"""
+Functions for handling inputs
+"""
+
+class InputsValidationError(RuntimeError):
+ pass
+
+def filter_entries(inputs_map, spec):
+ """Filter inputs entries that are not in the spec"""
+ param_names = [ p["name"] for p in spec["parameters"] \
+ if "sourced_at_deployment" in p and p["sourced_at_deployment"] ]
+
+ # Identify any missing parameters from inputs_map
+ missing = list(filter(lambda pn: pn not in inputs_map, param_names))
+
+ if missing:
+ raise InputsValidationError(
+ "Inputs map is missing keys: {0}".format(missing))
+
+ return { pn: inputs_map[pn] for pn in param_names }
diff --git a/mod/onboardingapi/dcae_cli/util/logger.py b/mod/onboardingapi/dcae_cli/util/logger.py
new file mode 100644
index 0000000..e8f21ce
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/logger.py
@@ -0,0 +1,56 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides logger utilities
+"""
+import logging
+
+import click
+
+
+class ClickHandler(logging.StreamHandler):
+
+ def emit(self, record):
+ msg = self.format(record)
+ click.echo(msg)
+
+
+_clihandler = ClickHandler()
+_formatter = logging.Formatter('%(name)s | %(levelname)s | %(message)s')
+_clihandler.setFormatter(_formatter)
+
+_root = logging.getLogger('DCAE')
+_root.setLevel(logging.WARNING)
+_root.handlers = [_clihandler, ]
+_root.propagate = False
+
+
+def get_logger(name=None):
+ return _root if name is None else _root.getChild(name)
+
+
+def set_verbose():
+ _root.setLevel(logging.INFO)
+
+
+def set_quiet():
+ _root.setLevel(logging.WARNING)
diff --git a/mod/onboardingapi/dcae_cli/util/policy.py b/mod/onboardingapi/dcae_cli/util/policy.py
new file mode 100644
index 0000000..2da9f0b
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/policy.py
@@ -0,0 +1,64 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+"""
+Function for Policy schema validation
+"""
+
+from jsonschema import validate, ValidationError
+from dcae_cli.util.logger import get_logger
+from dcae_cli.util import reraise_with_msg
+
+logger = get_logger('policy')
+
+_SCHEMA = {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title": "Schema for policy changes",
+ "type": "object",
+ "properties": {
+ "updated_policies": {"type": "array"},
+ "removed_policies": {"type": "array"},
+ "policies": {"type": "array"}
+ },
+ "additionalProperties": False
+}
+
+_validation_msg = """
+Is your Policy file a valid json?
+Does your Policy file follow this format?
+
+{
+ "updated_policies": [{},{},...],
+ "removed_policies": [{},{},...],
+ "policies": [{},{},...]
+}
+"""
+
+
+def validate_against_policy_schema(policy_file):
+ """Validate the policy file against the schema"""
+
+ try:
+ validate(policy_file, _SCHEMA)
+ except ValidationError as e:
+ logger.error("Policy file validation issue")
+ logger.error(_validation_msg)
+ reraise_with_msg(e, as_dcae=True)
+ \ No newline at end of file
diff --git a/mod/onboardingapi/dcae_cli/util/profiles.py b/mod/onboardingapi/dcae_cli/util/profiles.py
new file mode 100644
index 0000000..83ff6b5
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/profiles.py
@@ -0,0 +1,238 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Provides dcae cli profile variables
+"""
+import os
+from collections import namedtuple
+
+import six
+import click
+
+from dcae_cli import util
+from dcae_cli.util import get_app_dir, get_pref, write_pref
+from dcae_cli.util import config
+from dcae_cli.util.config import get_config, update_config
+from dcae_cli.util.exc import DcaeException
+from dcae_cli.util.logger import get_logger
+
+
+logger = get_logger('Profile')
+
+
+# reserved profile names
+ACTIVE = 'active'
+_reserved_names = {ACTIVE}
+
+
+# create enums for profile keys so that they can be imported for testing, instead of using literals
+CONSUL_HOST = 'consul_host'
+CONFIG_BINDING_SERVICE = 'config_binding_service'
+CDAP_BROKER = 'cdap_broker'
+DOCKER_HOST = 'docker_host'
+
+# TODO: Should probably lift this strict list of allowed keys and repurpose to be
+# keys that are required.
+_allowed_keys = set([CONSUL_HOST, CONFIG_BINDING_SERVICE, CDAP_BROKER, DOCKER_HOST])
+Profile = namedtuple('Profile', _allowed_keys)
+
+
+def _create_stub_profile():
+ """Create a new stub of a profile"""
+ return { k: "" for k in _allowed_keys }
+
+
+def _fmt_seq(seq):
+ '''Returns a sorted string formatted list'''
+ return list(sorted(map(str, seq)))
+
+
+def get_profiles_path():
+ '''Returns the absolute path to the profiles file'''
+ return os.path.join(get_app_dir(), 'profiles.json')
+
+
+def get_active_name():
+ '''Returns the active profile name in the config'''
+ return config.get_active_profile()
+
+
+def _set_active_name(name):
+ '''Sets the active profile name in the config'''
+ update_config(active_profile=name)
+
+
+class ProfilesInitError(RuntimeError):
+ pass
+
+def reinit_profiles():
+ """Reinitialize profiles
+
+ Grab the remote profiles and merge with the local profiles if there is one.
+
+ Returns:
+ --------
+ Dict of complete new profiles
+ """
+ # Grab the remote profiles and merge it in
+ try:
+ server_url = config.get_server_url()
+ new_profiles = util.fetch_file_from_web(server_url, "/dcae-cli/profiles.json")
+ except:
+ # Failing to pull seed profiles from remote server is not considered
+ # a problem. Just continue and give user the option to use an empty
+ # default.
+ if click.confirm("Could not download initial profiles from remote server. Set empty default?"):
+ new_profiles = {"default": { "consul_host": "",
+ "config_binding_service": "config_binding_service",
+ "cdap_broker": "cdap_broker", "docker_host": ""}}
+ else:
+ raise ProfilesInitError("Could not setup dcae-cli profiles")
+
+ profiles_path = get_profiles_path()
+
+ if util.pref_exists(profiles_path):
+ existing_profiles = get_profiles(include_active=False)
+ # Make sure to clobber existing values and not other way
+ existing_profiles.update(new_profiles)
+ new_profiles = existing_profiles
+
+ write_pref(new_profiles, profiles_path)
+ return new_profiles
+
+
+def get_profiles(user_only=False, include_active=True):
+ '''Returns a dict containing all available profiles
+
+ Example of the returned dict:
+ {
+ "profile-foo": {
+ "some_variable_A": "some_value_A",
+ "some_variable_B": "some_value_B",
+ "some_variable_C": "some_value_C"
+ }
+ }
+ '''
+ try:
+ profiles = get_pref(get_profiles_path(), reinit_profiles)
+ except ProfilesInitError as e:
+ raise DcaeException("Failed to initialize profiles: {0}".format(e))
+
+ if user_only:
+ return profiles
+
+ if include_active:
+ active_name = get_active_name()
+ if active_name not in profiles:
+ raise DcaeException("Active profile '{}' does not exist. How did this happen?".format(active_name))
+ profiles[ACTIVE] = profiles[active_name]
+
+ return profiles
+
+
+def get_profile(name=ACTIVE):
+ '''Returns a `Profile` object'''
+ profiles = get_profiles()
+
+ if name not in profiles:
+ raise DcaeException("Specified profile '{}' does not exist.".format(name))
+
+ try:
+ profile = Profile(**profiles[name])
+ except TypeError as e:
+ raise DcaeException("Specified profile '{}' is malformed.".format(name))
+
+ return profile
+
+
+def create_profile(name, **kwargs):
+ '''Creates a new profile'''
+ _assert_not_reserved(name)
+
+ profiles = get_profiles(user_only=True)
+ if name in profiles:
+ raise DcaeException("Profile '{}' already exists.".format(name))
+
+ profile = _create_stub_profile()
+ profile.update(kwargs)
+ _assert_valid_profile(profile)
+
+ profiles[name] = profile
+ _write_profiles(profiles)
+
+
+def delete_profile(name):
+ '''Deletes a profile'''
+ _assert_not_reserved(name)
+ profiles = get_profiles(user_only=True)
+ if name not in profiles:
+ raise DcaeException("Profile '{}' does not exist.".format(name))
+ if name == get_active_name():
+ logger.warning("Profile '{}' is currently active. Activate another profile first."
+ .format(name))
+ return False
+ del profiles[name]
+ _write_profiles(profiles)
+ return True
+
+
+def update_profile(name, **kwargs):
+ '''Creates or updates a profile'''
+ _assert_not_reserved(name)
+ _assert_valid_profile(kwargs)
+
+ profiles = get_profiles(user_only=True)
+ if name not in profiles:
+ raise DcaeException("Profile '{}' does not exist.".format(name))
+
+ profiles[name].update(kwargs)
+ _write_profiles(profiles)
+
+
+def _assert_valid_profile(params):
+ '''Raises DcaeException if the profile parameter dict is invalid'''
+ if not params:
+ raise DcaeException('No update key-value pairs were provided.')
+ keys = set(params.keys())
+ if not _allowed_keys.issuperset(keys):
+ invalid_keys = keys - _allowed_keys
+ raise DcaeException("Invalid keys {} detected. Only keys {} are supported.".format(_fmt_seq(invalid_keys), _fmt_seq(_allowed_keys)))
+
+
+def _assert_not_reserved(name):
+ '''Raises DcaeException if the profile is reserved'''
+ if name in _reserved_names:
+ raise DcaeException("Profile '{}' is reserved and cannot be modified.".format(name))
+
+
+def _write_profiles(profiles):
+ '''Writes the profiles dictionary to disk'''
+ return write_pref(profiles, path=get_profiles_path())
+
+
+def activate_profile(name):
+ '''Modifies the config and sets a new active profile'''
+ avail_profiles = set(get_profiles().keys()) - {ACTIVE, }
+ if name not in avail_profiles:
+ raise DcaeException("Profile name '{}' does not exist. Please select from {} or create a new profile.".format(name, _fmt_seq(avail_profiles)))
+ _set_active_name(name)
diff --git a/mod/onboardingapi/dcae_cli/util/run.py b/mod/onboardingapi/dcae_cli/util/run.py
new file mode 100644
index 0000000..293c725
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/run.py
@@ -0,0 +1,293 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides utilities for running components
+"""
+import time
+import six
+from functools import partial
+import click
+from dcae_cli.util import docker_util as du
+from dcae_cli.util import dmaap, inputs
+from dcae_cli.util.cdap_util import run_component as run_cdap_component
+from dcae_cli.util.exc import DcaeException
+from dcae_cli.util import discovery as dis
+from dcae_cli.util.discovery import get_user_instances, config_context, \
+ replace_dots
+import dcae_cli.util.profiles as profiles
+from dcae_cli.util.logger import get_logger
+from dcae_cli.catalog.mock.catalog import build_config_keys_map, \
+ get_data_router_subscriber_route
+# This seems to be an abstraction leak
+from dcae_cli.catalog.mock.schema import apply_defaults_docker_config
+
+
+log = get_logger('Run')
+
+
+def _get_instances(user, additional_user=None):
+ instance_map = get_user_instances(user)
+
+ if additional_user:
+ # Merge current user with another user's instance map to be available to
+ # connect to
+ instance_map_additional = get_user_instances(additional_user)
+ log.info("#Components for {0}: {1}".format(additional_user,
+ len(instance_map_additional)))
+ instance_map.update(instance_map_additional)
+
+ # REVIEW: Getting instances always returns back component names with dots
+ # even though the component name could originally have dots or dashes.
+ # To put this dot vs dash headache to rest, we have to understand what the
+ # discovery abstraction should be. Should the discovery be aware of this type
+ # of naming magic? If so then the discovery abstraction may need to be
+ # enhanced to be catalog aware to do name verfication queries. If not then
+ # the dot-to-dash transformation might not belong inside of the discovery
+ # abstraction and the higher level should do that.
+ #
+ # Another possible fix is to map the dots to something that's less likely to
+ # be used multiple dashes. This would help disambiguate between a forced
+ # mapping vs component name with dashes.
+ #
+ # In the meantime, here is a fix to address the issue where a downstream component
+ # can't be matched when the downstream component uses dashes. This affects
+ # the subsequent calls:
+ #
+ # - catalog.get_discovery* query
+ # - create_config
+ #
+ # The instance map will contain entries where the names will be with dots and
+ # with dashes. There should be no harm because only one set should match. The
+ # assumption is that people won't have the same name as dots and as dashes.
+ instance_map_dashes = { (replace_dots(k[0]), k[1]): v
+ for k, v in six.iteritems(instance_map) }
+ instance_map.update(instance_map_dashes)
+
+ return instance_map
+
+
+def _update_delivery_urls(spec, target_host, dmaap_map):
+ """Updates the delivery urls for data router subscribers"""
+ # Try to stick in the more appropriate delivery url which is not realized
+ # until after deployment because you need the ip, port.
+ # Realized that this is not actually needed by the component but kept it because
+ # it might be useful for component developers to **see** this info.
+ get_route_func = partial(get_data_router_subscriber_route, spec)
+ target_base_url = "http://{0}".format(target_host)
+ return dmaap.update_delivery_urls(get_route_func, target_base_url,
+ dmaap_map)
+
+
+def _verify_component(name, max_wait, consul_host):
+ """Verify that the component is healthy
+
+ Args:
+ -----
+ max_wait (integer): limit to how may attempts to make which translates to
+ seconds because each sleep is one second. 0 means infinite.
+
+ Return:
+ -------
+ True if component is healthy else returns False
+ """
+ num_attempts = 1
+
+ while True:
+ if dis.is_healthy(consul_host, name):
+ return True
+ else:
+ num_attempts += 1
+
+ if max_wait > 0 and max_wait < num_attempts:
+ return False
+
+ time.sleep(1)
+
+
+def run_component(user, cname, cver, catalog, additional_user, attached, force,
+ dmaap_map, inputs_map, external_ip=None):
+ '''Runs a component based on the component type
+
+ Args
+ ----
+ force: (boolean)
+ Continue to run even when there are no valid downstream components,
+ when this flag is set to True.
+ dmaap_map: (dict) config_key to message router or data router connections.
+ Used as a manual way to make available this information for the component.
+ inputs_map: (dict) config_key to value that is intended to be provided at
+ deployment time as an input
+ '''
+ cname, cver = catalog.verify_component(cname, cver)
+ ctype = catalog.get_component_type(cname, cver)
+ profile = profiles.get_profile()
+
+ instance_map = _get_instances(user, additional_user)
+ neighbors = six.iterkeys(instance_map)
+
+
+ dmaap_config_keys = catalog.get_discovery_for_dmaap(cname, cver)
+
+ if not dmaap.validate_dmaap_map_entries(dmaap_map, *dmaap_config_keys):
+ return
+
+ if ctype == 'docker':
+ params, interface_map = catalog.get_discovery_for_docker(cname, cver, neighbors)
+ should_wait = attached
+
+ spec = catalog.get_component_spec(cname, cver)
+ config_key_map = build_config_keys_map(spec)
+ inputs_map = inputs.filter_entries(inputs_map, spec)
+
+ dmaap_map = _update_delivery_urls(spec, profile.docker_host.split(":")[0],
+ dmaap_map)
+
+ with config_context(user, cname, cver, params, interface_map,
+ instance_map, config_key_map, dmaap_map=dmaap_map, inputs_map=inputs_map,
+ always_cleanup=should_wait, force_config=force) as (instance_name, _):
+ image = catalog.get_docker_image(cname, cver)
+ docker_config = catalog.get_docker_config(cname, cver)
+
+ docker_logins = dis.get_docker_logins()
+
+ if should_wait:
+ du.deploy_component(profile, image, instance_name, docker_config,
+ should_wait=True, logins=docker_logins)
+ else:
+ result = du.deploy_component(profile, image, instance_name, docker_config,
+ logins=docker_logins)
+ log.debug(result)
+
+ if result:
+ log.info("Deployed {0}. Verifying..".format(instance_name))
+
+ # TODO: Be smarter here but for now wait longer i.e. 5min
+ max_wait = 300 # 300s == 5min
+
+ if _verify_component(instance_name, max_wait,
+ dis.default_consul_host()):
+ log.info("Container is up and healthy")
+
+ # This block of code is used to construct the delivery
+ # urls for data router subscribers and to display it for
+ # users to help with manually provisioning feeds.
+ results = dis.lookup_instance(dis.default_consul_host(),
+ instance_name)
+ target_host = dis.parse_instance_lookup(results)
+
+ dmaap_map = _update_delivery_urls(spec, target_host, dmaap_map)
+ delivery_urls = dmaap.list_delivery_urls(dmaap_map)
+
+ if delivery_urls:
+ msg = "\n".join(["\t{k}: {url}".format(k=k, url=url)
+ for k, url in delivery_urls])
+ msg = "\n\n{0}\n".format(msg)
+ log.warn("Your component is a data router subscriber. Here are the delivery urls: {0}".format(msg))
+ else:
+ log.warn("Container never became healthy")
+ else:
+ raise DcaeException("Failed to deploy docker component")
+
+ elif ctype =='cdap':
+ (jar, config, spec) = catalog.get_cdap(cname, cver)
+ config_key_map = build_config_keys_map(spec)
+ inputs_map = inputs.filter_entries(inputs_map, spec)
+
+ params, interface_map = catalog.get_discovery_for_cdap(cname, cver, neighbors)
+
+ with config_context(user, cname, cver, params, interface_map, instance_map,
+ config_key_map, dmaap_map=dmaap_map, inputs_map=inputs_map, always_cleanup=False,
+ force_config=force) as (instance_name, templated_conf):
+ run_cdap_component(catalog, params, instance_name, profile, jar, config, spec, templated_conf)
+ else:
+ raise DcaeException("Unsupported component type for run")
+
+
+def dev_component(user, catalog, specification, additional_user, force, dmaap_map,
+ inputs_map):
+ '''Sets up the discovery layer for in development component
+
+ The passed-in component specification is
+ * Validated it
+ * Generates the corresponding application config
+ * Pushes the application config and rels key into Consul
+
+ This allows developers to play with their spec and the resulting configuration
+ outside of being in the catalog and in a container.
+
+ Args
+ ----
+ user: (string) user name
+ catalog: (object) instance of MockCatalog
+ specification: (dict) experimental component specification
+ additional_user: (string) another user name used to source additional
+ component instances
+ force: (boolean)
+ Continue to run even when there are no valid downstream components when
+ this flag is set to True.
+ dmaap_map: (dict) config_key to message router connections. Used as a
+ manual way to make available this information for the component.
+ inputs_map: (dict) config_key to value that is intended to be provided at
+ deployment time as an input
+ '''
+ instance_map = _get_instances(user, additional_user)
+ neighbors = six.iterkeys(instance_map)
+
+ params, interface_map, dmaap_config_keys = catalog.get_discovery_from_spec(
+ user, specification, neighbors)
+
+ if not dmaap.validate_dmaap_map_entries(dmaap_map, *dmaap_config_keys):
+ return
+
+ cname = specification["self"]["name"]
+ cver = specification["self"]["version"]
+ config_key_map = build_config_keys_map(specification)
+ inputs_map = inputs.filter_entries(inputs_map, specification)
+
+ dmaap_map = _update_delivery_urls(specification, "localhost", dmaap_map)
+
+ with config_context(user, cname, cver, params, interface_map, instance_map,
+ config_key_map, dmaap_map, inputs_map=inputs_map, always_cleanup=True,
+ force_config=force) \
+ as (instance_name, templated_conf):
+
+ click.echo("Ready for component development")
+
+ if specification["self"]["component_type"] == "docker":
+ # The env building is only for docker right now
+ docker_config = apply_defaults_docker_config(specification["auxilary"])
+ envs = du.build_envs(profiles.get_profile(), docker_config, instance_name)
+ envs_message = "\n".join(["export {0}={1}".format(k, v) for k,v in envs.items()])
+ envs_filename = "env_{0}".format(profiles.get_active_name())
+
+ with open(envs_filename, "w") as f:
+ f.write(envs_message)
+
+ click.echo()
+ click.echo("Setup these environment varibles. Run \"source {0}\":".format(envs_filename))
+ click.echo()
+ click.echo(envs_message)
+ click.echo()
+ else:
+ click.echo("Set the following as your HOSTNAME:\n {0}".format(instance_name))
+
+ input("Press any key to stop and to clean up")
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_cdap_util.py b/mod/onboardingapi/dcae_cli/util/tests/test_cdap_util.py
new file mode 100644
index 0000000..9282691
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_cdap_util.py
@@ -0,0 +1,93 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+from dcae_cli.util.cdap_util import _merge_spec_config_into_broker_put, normalize_cdap_params
+
+
+def test_normalize_cdap_params():
+ spec = {"parameters" : {}}
+ normalized = normalize_cdap_params(spec)
+ assert normalized == {"app_preferences" : {},
+ "app_config" : {},
+ "program_preferences" : []}
+
+def test_cdap_util():
+ """
+ Tests both _merge_spec_config_into_broker_put and normalize_cdap_params
+ """
+ jar = "bahphomet.com/nexus/doomsday.jar"
+ config = {
+ "artifact_name" : "testname",
+ "artifact_version" : "6.6.6",
+ "streamname" : "stream",
+ "programs" : [{"program_type" : "flows", "program_id" : "flow_id"}],
+ "namespace" : "underworld"
+ }
+ spec = {
+ "self": {
+ "version": "6.6.6",
+ "description": "description",
+ "component_type": "cdap",
+ "name": "name"
+ },
+ "parameters" : {
+ "app_preferences" : [{"name" : "he", "description" : "", "value" : "shall rise"}],
+ "program_preferences" : [{"program_type" : "flows", "program_id" : "flow_id", "program_pref" : [{"name": "foo", "description" : "", "value" : "bar"}]}]
+ },
+
+ "streams": {
+ "publishes": [],
+ "subscribes" : []
+ },
+ "services": {
+ "calls" : [],
+ 'provides': [
+ {"request": {"format" : 'std.format_one', "version" : "1.0.0"},
+ "response" : {"format" : "std.format_two", "version" : "1.5.0"},
+ "service_name" : "baphomet",
+ "service_endpoint" : "rises",
+ "verb" : "GET"}
+ ]
+ },
+ }
+ parsed_parameters = normalize_cdap_params(spec)
+ templated_conf = {"streams_publishes":{}, "streams_subscribes": {},
+ "services_calls": {}} #TODO: Incorporate a test templated_conf
+ broker_put = _merge_spec_config_into_broker_put(jar, config, spec, parsed_parameters, templated_conf)
+
+ expected = {
+ "app_config": {"services_calls" : {},
+ "streams_publishes" : {},
+ "streams_subscribes": {}
+ },
+ "app_preferences": {"he" : "shall rise"},
+ "artifact_name" : "testname",
+ "artifact_version" : "6.6.6",
+ "jar_url": "bahphomet.com/nexus/doomsday.jar",
+ "namespace": "underworld",
+ "program_preferences" : [{"program_type" : "flows", "program_id" : "flow_id", "program_pref" : {"foo" : "bar"}}],
+ "programs" : [{"program_type" : "flows", "program_id" : "flow_id"}],
+ "service_component_type": "cdap",
+ "services": [{"service_name" : "baphomet", "service_endpoint" : "rises", "endpoint_method" : "GET"}],
+ "streamname": "stream",
+ "cdap_application_type" : "program-flowlet"
+ }
+
+ assert broker_put == expected
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_config.py b/mod/onboardingapi/dcae_cli/util/tests/test_config.py
new file mode 100644
index 0000000..3b4cd6e
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_config.py
@@ -0,0 +1,137 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Tests the config functionality
+"""
+import os, json
+from functools import partial
+from mock import patch
+
+import pytest
+import click
+
+import dcae_cli
+from dcae_cli.util import config, write_pref
+from dcae_cli.util.config import get_app_dir, get_config, get_config_path
+
+
+def test_no_config(monkeypatch, tmpdir):
+ '''Tests the creation and initialization of a config on a clean install'''
+ monkeypatch.setattr(click, "get_app_dir", lambda app: str(tmpdir.realpath()))
+
+ mock_config = {'user': 'mock-user'}
+
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(mock_config))
+
+ assert get_config() == mock_config
+
+
+def test_init_config_user(monkeypatch):
+ good_case = "abc123"
+ values = [ good_case, "d-e-f", "g*h*i", "j k l" ]
+
+ def fake_input(values, message, type="red"):
+ return values.pop()
+
+ monkeypatch.setattr(click, 'prompt', partial(fake_input, values))
+ assert config._init_config_user() == good_case
+
+
+def test_init_config(monkeypatch):
+ monkeypatch.setattr(config, '_init_config_user', lambda: "bigmama")
+ monkeypatch.setattr(config, '_init_config_server_url',
+ lambda: "http://some-nexus-in-the-sky.com")
+ monkeypatch.setattr(dcae_cli.util, 'fetch_file_from_web',
+ lambda server_url, path: { "db_url": "conn" })
+ monkeypatch.setattr("dcae_cli._version.__version__", "2.X.X")
+
+ expected = {'cli_version': '2.X.X', 'user': 'bigmama', 'db_url': 'conn',
+ 'server_url': 'http://some-nexus-in-the-sky.com',
+ 'active_profile': 'default' }
+ assert expected == config._init_config()
+
+ # Test using of db fallback
+
+ monkeypatch.setattr(dcae_cli.util, 'fetch_file_from_web',
+ lambda server_url, path: { "db_url": "" })
+
+ db_url = "postgresql://king:of@mountain:5432/dcae_onboarding_db"
+
+ def fake_init_config_db_url():
+ return db_url
+
+ monkeypatch.setattr(config, "_init_config_db_url",
+ fake_init_config_db_url)
+
+ assert db_url == config._init_config()["db_url"]
+
+ monkeypatch.setattr(dcae_cli.util, 'fetch_file_from_web',
+ lambda server_url, path: {})
+
+ assert db_url == config._init_config()["db_url"]
+
+ # Simulate error trying to fetch
+
+ def fetch_simulate_error(server_url, path):
+ raise RuntimeError("Simulated error")
+
+ monkeypatch.setattr(dcae_cli.util, 'fetch_file_from_web',
+ fetch_simulate_error)
+ # Case when user opts out of manually setting up
+ monkeypatch.setattr(click, "confirm", lambda msg: False)
+
+ with pytest.raises(config.ConfigurationInitError):
+ config._init_config()
+
+
+def test_should_force_reinit():
+ bad_config = {}
+ assert config.should_force_reinit(bad_config) == True
+
+ old_config = { "cli_version": "1.0.0" }
+ assert config.should_force_reinit(old_config) == True
+
+ uptodate_config = { "cli_version": "2.0.0" }
+ assert config.should_force_reinit(uptodate_config) == False
+
+
+def test_reinit_config(monkeypatch, tmpdir):
+ monkeypatch.setattr(click, "get_app_dir", lambda app: str(tmpdir.realpath()))
+
+ new_config = { "user": "ninny", "db_url": "some-db" }
+
+ def init():
+ return new_config
+
+ assert config._reinit_config(init) == new_config
+
+ old_config = { "user": "super", "db_url": "other-db", "hidden": "yo" }
+ write_pref(old_config, get_config_path())
+
+ new_config["hidden"] = "yo"
+ assert config._reinit_config(init) == new_config
+
+
+if __name__ == '__main__':
+ '''Test area'''
+ pytest.main([__file__, ])
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_discovery.py b/mod/onboardingapi/dcae_cli/util/tests/test_discovery.py
new file mode 100644
index 0000000..2148ea3
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_discovery.py
@@ -0,0 +1,447 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+'''
+Provides tests for the discovery module
+'''
+import json
+from functools import partial
+from copy import deepcopy
+
+import pytest
+
+from dcae_cli.util import discovery as dis
+from dcae_cli.util.discovery import create_config, Consul, config_context, DiscoveryNoDownstreamComponentError
+
+
+user = 'bob'
+cname = 'asimov.test_comp'
+cver = '0.0.0'
+inst_pref = 'abc123'
+params = {'param0': 12345}
+
+
+def test_create_config():
+ '''
+ Test explanation:
+ 1. param1 in the component spec has 2 compatible component types, comp1 and comp2. however infrastructure
+ support only allows for 1. thus comp2 shouldn't make it to the rels.
+ 2. comp1 has two instances, so both should make it to the rels
+ 3. param2 is compatible with comp3, but there are no comp3 instances. thus it's missing from rels.
+ '''
+ expected_ckey = 'bob.abc123.0-0-0.asimov-test_comp'
+ expected_conf = {'param1': '{{1-1-1.foo-bar-comp1}}', 'param0': 12345, 'param2': '{{3-3-3.foo-bar-comp3}}'}
+ expected_rkey = 'bob.abc123.0-0-0.asimov-test_comp:rel'
+ expected_rels = ['bob.aaa111.1-1-1.foo-bar-comp1.suffix',
+ 'bob.bbb222.1-1-1.foo-bar-comp1.suffix',
+ 'bob.ddd444.3-3-3.foo-bar-comp3.suffix']
+ expected_dmaap_key = 'bob.abc123.0-0-0.asimov-test_comp:dmaap'
+ expected_dmaap_map = {}
+
+ interface_map = {'param1': [('foo.bar.comp1', '1.1.1'),
+ ('foo.bar.comp2', '2.2.2')],
+ 'param2': [('foo.bar.comp3', '3.3.3')]
+ }
+ instance_map = {('foo.bar.comp1', '1.1.1'): ['bob.aaa111.1-1-1.foo-bar-comp1.suffix',
+ 'bob.bbb222.1-1-1.foo-bar-comp1.suffix'],
+ ('foo.bar.comp2', '2.2.2'): ['bob.ccc333.2-2-2.foo-bar-comp2.suffix'],
+ ('foo.bar.comp3', '3.3.3'): ['bob.ddd444.3-3-3.foo-bar-comp3.suffix']}
+
+ ckey, conf, rkey, rels, dmaap_key, dmaap_map = create_config(user, cname, cver,
+ params, interface_map, instance_map, expected_dmaap_map, inst_pref)
+
+ assert ckey == expected_ckey
+ assert conf == expected_conf
+ assert rkey == expected_rkey
+ assert sorted(rels) == sorted(expected_rels)
+ assert dmaap_key == expected_dmaap_key
+ assert dmaap_map == expected_dmaap_map
+
+ #
+ # Fail cases: When a downstream dependency does not exist
+ #
+
+ # (1) Case when there's no actual instance
+ instance_map_missing_3 = deepcopy(instance_map)
+ instance_map_missing_3[('foo.bar.comp3', '3.3.3')] = []
+
+ with pytest.raises(DiscoveryNoDownstreamComponentError):
+ create_config(user, cname, cver, params, interface_map, instance_map_missing_3,
+ expected_dmaap_map, inst_pref)
+
+ # (2) Case when there's no existence in instance_map
+ interface_map_extra = deepcopy(interface_map)
+ interface_map_extra["param_not_exist"] = []
+
+ with pytest.raises(DiscoveryNoDownstreamComponentError):
+ create_config(user, cname, cver, params, interface_map_extra, instance_map,
+ expected_dmaap_map, inst_pref)
+
+ #
+ # Force the fail cases to succeed
+ #
+
+ # (1)
+ ckey, conf, rkey, rels, dmaap_key, dmaap_map = create_config(user, cname, cver,
+ params, interface_map, instance_map_missing_3, expected_dmaap_map, inst_pref,
+ force=True)
+
+ assert ckey == expected_ckey
+ assert conf == expected_conf
+ assert rkey == expected_rkey
+ # Remove the foo.bar.comp3:3.3.3 instance because we are simulating when that
+ # instance does not exist
+ assert sorted(rels) == sorted(expected_rels[:2])
+ assert dmaap_key == expected_dmaap_key
+ assert dmaap_map == expected_dmaap_map
+
+ # (2)
+ ckey, conf, rkey, rels, dmaap_key, dmaap_map = create_config(user, cname, cver,
+ params, interface_map_extra, instance_map, expected_dmaap_map, inst_pref,
+ force=True)
+
+ expected_conf["param_not_exist"] = "{{}}"
+
+ assert ckey == expected_ckey
+ assert conf == expected_conf
+ assert rkey == expected_rkey
+ assert sorted(rels) == sorted(expected_rels)
+ assert dmaap_key == expected_dmaap_key
+ assert dmaap_map == expected_dmaap_map
+
+ #
+ # Test differnt dashes scenario
+ #
+
+ # Component has been added with dashes but the instance comes back with dots
+ # because the discovery layer always brings back instances with dots
+ interface_map_dashes = {'param1': [('foo-bar-comp1', '1.1.1')]}
+ instance_map_dashes = {('foo.bar.comp1', '1.1.1'):
+ ['bob.aaa111.1-1-1.foo-bar-comp1.suffix']}
+
+ with pytest.raises(DiscoveryNoDownstreamComponentError):
+ create_config(user, cname, cver, params, interface_map_dashes, instance_map_dashes,
+ expected_dmaap_map, inst_pref)
+
+ # The fix in v2.3.2 was to have the caller to send in instances with dots and
+ # with dashes
+ instance_map_dashes = {
+ ('foo.bar.comp1', '1.1.1'): ['bob.aaa111.1-1-1.foo-bar-comp1.suffix'],
+ ('foo-bar-comp1', '1.1.1'): ['bob.aaa111.1-1-1.foo-bar-comp1.suffix'] }
+
+ ckey, conf, rkey, rels, dmaap_key, dmaap_map = create_config(user, cname, cver,
+ params, interface_map_dashes, instance_map_dashes, expected_dmaap_map, inst_pref)
+
+ # The expecteds have changed because the inputs have been narrowed to just
+ # one
+ assert ckey == expected_ckey
+ assert conf == {'param1': '{{1-1-1.foo-bar-comp1}}', 'param0': 12345}
+ assert rkey == expected_rkey
+ assert sorted(rels) == sorted(['bob.aaa111.1-1-1.foo-bar-comp1.suffix'])
+ assert dmaap_key == expected_dmaap_key
+ assert dmaap_map == expected_dmaap_map
+
+ # Pass in a non-empty dmaap map
+ dmaap_map_input = { "some-config-key": { "type": "message_router",
+ "dmaap_info": {"topic_url": "http://some-topic-url.com/abc"} } }
+ del expected_conf["param_not_exist"]
+ expected_conf["some-config-key"] = { "type": "message_router",
+ "dmaap_info": "<<some-config-key>>" }
+
+ ckey, conf, rkey, rels, dmaap_key, dmaap_map = create_config(user, cname, cver,
+ params, interface_map, instance_map, dmaap_map_input, inst_pref)
+
+ assert ckey == expected_ckey
+ assert conf == expected_conf
+ assert rkey == expected_rkey
+ assert sorted(rels) == sorted(expected_rels)
+ assert dmaap_key == expected_dmaap_key
+ assert dmaap_map == {'some-config-key': {'topic_url': 'http://some-topic-url.com/abc'}}
+
+
+@pytest.mark.skip(reason="Not a pure unit test")
+def test_config_context(mock_cli_config):
+ interface_map = {'param1': [('foo.bar.comp1', '1.1.1'),
+ ('foo.bar.comp2', '2.2.2')],
+ 'param2': [('foo.bar.comp3', '3.3.3')]
+ }
+ instance_map = {('foo.bar.comp1', '1.1.1'): ['bob.aaa111.1-1-1.foo-bar-comp1.suffix',
+ 'bob.bbb222.1-1-1.foo-bar-comp1.suffix'],
+ ('foo.bar.comp2', '2.2.2'): ['bob.ccc333.2-2-2.foo-bar-comp2.suffix'],
+ ('foo.bar.comp3', '3.3.3'): ['bob.ddd444.3-3-3.foo-bar-comp3.suffix']}
+
+ config_key_map = {"param1": {"group": "streams_publishes", "type": "http"},
+ "param2": {"group": "services_calls", "type": "http"}}
+
+ ckey = 'bob.abc123.0-0-0.asimov-test_comp'
+ rkey = 'bob.abc123.0-0-0.asimov-test_comp:rel'
+ expected_conf = {"streams_publishes": {'param1': '{{1-1-1.foo-bar-comp1}}'},
+ 'param0': 12345, "streams_subscribes": {},
+ "services_calls": {'param2': '{{3-3-3.foo-bar-comp3}}'}}
+ expected_rels = ['bob.aaa111.1-1-1.foo-bar-comp1.suffix',
+ 'bob.bbb222.1-1-1.foo-bar-comp1.suffix',
+ 'bob.ddd444.3-3-3.foo-bar-comp3.suffix']
+
+ c = Consul(dis.default_consul_host())
+ with config_context(user, cname, cver, params, interface_map, instance_map,
+ config_key_map, instance_prefix=inst_pref) as (instance,_):
+ assert json.loads(c.kv.get(ckey)[1]['Value'].decode('utf-8')) == expected_conf
+ assert sorted(json.loads(c.kv.get(rkey)[1]['Value'].decode('utf-8'))) \
+ == sorted(expected_rels)
+ assert instance == ckey
+
+ assert c.kv.get(ckey)[1] is None
+ assert c.kv.get(rkey)[1] is None
+
+ # Fail case: When a downstream dependency does not exist
+ interface_map_extra = deepcopy(interface_map)
+ interface_map_extra["param_not_exist"] = []
+
+ with pytest.raises(DiscoveryNoDownstreamComponentError):
+ with config_context(user, cname, cver, params, interface_map_extra,
+ instance_map, config_key_map, instance_prefix=inst_pref) as (instance,_):
+ pass
+
+ # Force fail case to succeed
+ expected_conf["param_not_exist"] = "{{}}"
+
+ with config_context(user, cname, cver, params, interface_map_extra,
+ instance_map, config_key_map, instance_prefix=inst_pref,
+ force_config=True) as (instance,_):
+ assert json.loads(c.kv.get(ckey)[1]['Value'].decode('utf-8')) == expected_conf
+ assert sorted(json.loads(c.kv.get(rkey)[1]['Value'].decode('utf-8'))) \
+ == sorted(expected_rels)
+ assert instance == ckey
+
+
+def test_inst_regex():
+ ckey = 'bob.abc123.0-0-0.asimov-test_comp'
+ match = dis._inst_re.match(ckey)
+ assert match != None
+
+ # Big version case
+
+ ckey = 'bob.abc123.100-100-100.asimov-test_comp'
+ match = dis._inst_re.match(ckey)
+ assert match != None
+
+
+def test_is_healthy_pure():
+ component = { 'CreateIndex': 204546, 'Flags': 0,
+ 'Key': 'mike.21fbcabd-fac1-4b9b-9d18-2f624bfa44a5.0-4-0.sandbox-platform-dummy_subscriber', 'LockIndex': 0, 'ModifyIndex': 204546,
+ 'Value': b'{}' }
+
+ component_health_good = ('262892',
+ [{'Checks': [{'CheckID': 'serfHealth',
+ 'CreateIndex': 3,
+ 'ModifyIndex': 3,
+ 'Name': 'Serf Health Status',
+ 'Node': 'agent-one',
+ 'Notes': '',
+ 'Output': 'Agent alive and reachable',
+ 'ServiceID': '',
+ 'ServiceName': '',
+ 'Status': 'passing'},
+ {'CheckID': 'service:rework-central-swarm-master:mike.21fbcabd-fac1-4b9b-9d18-2f624bfa44a5.0-4-0.sandbox-platform-dummy_subscriber:8080',
+ 'CreateIndex': 204550,
+ 'ModifyIndex': 204551,
+ 'Name': 'Service '
+ "'mike.21fbcabd-fac1-4b9b-9d18-2f624bfa44a5.0-4-0.sandbox-platform-dummy_subscriber' "
+ 'check',
+ 'Node': 'agent-one',
+ 'Notes': '',
+ 'Output': '',
+ 'ServiceID': 'rework-central-swarm-master:mike.21fbcabd-fac1-4b9b-9d18-2f624bfa44a5.0-4-0.sandbox-platform-dummy_subscriber:8080',
+ 'ServiceName': 'mike.21fbcabd-fac1-4b9b-9d18-2f624bfa44a5.0-4-0.sandbox-platform-dummy_subscriber',
+ 'Status': 'passing'}],
+ 'Node': {'Address': '10.170.2.17',
+ 'CreateIndex': 3,
+ 'ModifyIndex': 262877,
+ 'Node': 'agent-one',
+ 'TaggedAddresses': {'wan': '10.170.2.17'}},
+ 'Service': {'Address': '196.207.170.175',
+ 'CreateIndex': 204550,
+ 'EnableTagOverride': False,
+ 'ID': 'rework-central-swarm-master:mike.21fbcabd-fac1-4b9b-9d18-2f624bfa44a5.0-4-0.sandbox-platform-dummy_subscriber:8080',
+ 'ModifyIndex': 204551,
+ 'Port': 33064,
+ 'Service': 'mike.21fbcabd-fac1-4b9b-9d18-2f624bfa44a5.0-4-0.sandbox-platform-dummy_subscriber',
+ 'Tags': None}}])
+
+ assert True == dis._is_healthy_pure(lambda name: component_health_good, component)
+
+ # Case: Check is failing
+
+ component_health_bad = deepcopy(component_health_good)
+ # NOTE: The failed status here. Not sure if this is what Consul actually sends
+ # but at least its not "passing"
+ component_health_bad[1][0]["Checks"][0]["Status"] = "failing"
+
+ assert False == dis._is_healthy_pure(lambda name: component_health_bad, component)
+
+ # Case: No health for a component
+
+ component_health_nothing = ('262892', [])
+ assert False == dis._is_healthy_pure(lambda name: component_health_nothing, component)
+
+
+def test_get_instances_from_kv():
+
+ def get_from_kv_fake(result, user, recurse=True):
+ return "don't care about first arg", result
+
+ user = "jane"
+ kvs_nothing = []
+
+ assert dis._get_instances_from_kv(partial(get_from_kv_fake, kvs_nothing), user) == []
+
+ kvs_success = [ { "Value": "some value", "Key": "jane.1344a03a-06a8-4b92-bfac-d8f89df0c0cd.1-0-0.dcae-controller-ves-collector:rel"
+ },
+ { "Value": "some value", "Key": "jane.1344a03a-06a8-4b92-bfac-d8f89df0c0cd.1-0-0.dcae-controller-ves-collector" } ]
+
+ assert dis._get_instances_from_kv(partial(get_from_kv_fake, kvs_success), user) == ["jane.1344a03a-06a8-4b92-bfac-d8f89df0c0cd.1-0-0.dcae-controller-ves-collector"]
+
+ kvs_partial = [ { "Value": "some value", "Key": "jane.1344a03a-06a8-4b92-bfac-d8f89df0c0cd.1-0-0.dcae-controller-ves-collector:rel"
+ } ]
+
+ assert dis._get_instances_from_kv(partial(get_from_kv_fake, kvs_partial), user) == ["jane.1344a03a-06a8-4b92-bfac-d8f89df0c0cd.1-0-0.dcae-controller-ves-collector"]
+
+
+def test_get_instances_from_catalog():
+
+ def get_from_catalog_fake(result):
+ return ("some Consul index", result)
+
+ user = "jane"
+ services_nothing = {}
+
+ assert dis._get_instances_from_catalog(
+ partial(get_from_catalog_fake, services_nothing), user) == []
+
+ services_no_matching = { '4f09bb72-8578-4e82-a6a4-9b7d679bd711.cdap_app_hello_world.hello-world-cloudify-test': [],
+ '666.fake_testing_service.rework-central.com': [],
+ 'Platform_Dockerhost_Solutioning_Test': [],
+ 'jack.2271ec6b-9224-4f42-b0b0-bfa91b41218f.1-0-1.cdap-event-proc-map-app': [],
+ 'jack.bca28c8c-a352-41f1-81bc-63ff46db2582.1-0-1.cdap-event-proc-supplement-app':
+ [] }
+
+ assert dis._get_instances_from_catalog(
+ partial(get_from_catalog_fake, services_no_matching), user) == []
+
+ services_success = { '4f09bb72-8578-4e82-a6a4-9b7d679bd711.cdap_app_hello_world.hello-world-cloudify-test': [],
+ '666.fake_testing_service.rework-central.com': [],
+ 'Platform_Dockerhost_Solutioning_Test': [],
+ 'jack.2271ec6b-9224-4f42-b0b0-bfa91b41218f.1-0-1.cdap-event-proc-map-app': [],
+ 'jane.bca28c8c-a352-41f1-81bc-63ff46db2582.1-0-1.cdap-event-proc-supplement-app':
+ [] }
+
+ assert dis._get_instances_from_catalog(
+ partial(get_from_catalog_fake, services_success), user) == ['jane.bca28c8c-a352-41f1-81bc-63ff46db2582.1-0-1.cdap-event-proc-supplement-app']
+
+
+def test_merge_instances():
+ user = "somebody"
+ group_one = [ "123", "456" ]
+ group_two = [ "123", "abc" ]
+ group_three = []
+
+ assert sorted(dis._merge_instances(user, lambda user: group_one, lambda user: group_two,
+ lambda user: group_three)) == sorted([ "123", "456", "abc" ])
+
+
+def test_make_instance_map():
+ instances_latest_format = ["mike.112e4faa-2ac8-4b13-93e9-8924150538d5.0-5-0.sandbox-platform-laika"]
+
+ instances_map = dis._make_instances_map(instances_latest_format)
+ assert instances_map.get(("sandbox.platform.laika", "0.5.0")) == set(instances_latest_format)
+
+
+def test_get_component_instances(monkeypatch):
+ instances = [
+ 'jane.b493b48b-5fdf-4c1d-bd2a-8ce747b918ba.1-0-0.dcae-controller-ves-collector',
+ 'jane.2455ec5c-67e6-4d4d-8581-79037c7b5f8e.1-0-0.dcae-controller-ves-collector.rework-central.dcae.com',
+ 'jane.bfbb1356-d703-4007-8799-759a9e1fc8c2.1-0-0.dcae-controller-ves-collector.rework-central.dcae.com',
+ 'jane.89d82ff6-1482-4c01-8758-db9325aad085.1-0-0.dcae-controller-ves-collector'
+ ]
+
+ instances_map = { ('dcae.controller.ves.collector', '1.0.0'): set(instances) }
+
+ def get_user_instances_mock(user, consul_host=None, filter_instances_func=None):
+ return instances_map
+
+ monkeypatch.setattr(dis, 'get_user_instances', get_user_instances_mock)
+
+ def always_true_filter(consul_host, instance):
+ return True
+
+ # Test base case
+
+ user = "jane"
+ cname = "dcae.controller.ves.collector"
+ cver = "1.0.0"
+ consul_host = "bogus"
+
+ assert sorted(dis._get_component_instances(always_true_filter, user, cname, cver,
+ consul_host)) == sorted(instances)
+
+ # Test for dashes
+
+ cname = "dcae-controller-ves-collector"
+
+ assert sorted(dis._get_component_instances(always_true_filter, user, cname, cver,
+ consul_host)) == sorted(instances)
+
+
+def test_group_config():
+ config_key_map = {'call1': {'group': 'services_calls'}, 'pub1': {'type': 'http', 'group': 'streams_publishes'}, 'sub2': {'type': 'message_router', 'group': 'streams_subscribes'}, 'pub2': {'type': 'message_router', 'group': 'streams_publishes'}}
+
+ config = { "call1": "{{yo}}", "pub1": "{{target}}", "some-param": 123,
+ "sub2": { "dmaap_info": "<<sub2>>" }, "pub2": { "dmaap_info": "<<pub2>>" } }
+
+ gc = dis._group_config(config, config_key_map)
+ expected = {'services_calls': {'call1': '{{yo}}'}, 'streams_publishes': {'pub2': {'dmaap_info': '<<pub2>>'}, 'pub1': '{{target}}'}, 'some-param': 123, 'streams_subscribes': {'sub2': {'dmaap_info': '<<sub2>>'}}}
+
+ assert gc == expected
+
+
+def test_parse_instance_lookup():
+ results = [{"ServiceAddress": "192.168.1.100", "ServicePort": "8080"},
+ {"ServiceAddress": "10.100.1.100", "ServicePort": "8081"}]
+ assert dis.parse_instance_lookup(results) == "192.168.1.100:8080"
+
+
+def test_apply_inputs():
+ updated_config = dis._apply_inputs({"foo": "bar"}, {"foo": "baz"})
+ assert updated_config == {"foo": "baz"}
+
+
+def test_choose_consul_host(monkeypatch):
+ def fake_default_consul_host():
+ return "default-consul-host"
+
+ monkeypatch.setattr(dis, "default_consul_host", fake_default_consul_host)
+ assert "default-consul-host" == dis._choose_consul_host(None)
+ assert "provided-consul-host" == dis._choose_consul_host("provided-consul-host")
+
+
+if __name__ == '__main__':
+ '''Test area'''
+ pytest.main([__file__, ])
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_dmaap.py b/mod/onboardingapi/dcae_cli/util/tests/test_dmaap.py
new file mode 100644
index 0000000..dabc737
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_dmaap.py
@@ -0,0 +1,259 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+"""
+Tests for dmaap module
+"""
+import pytest
+from dcae_cli.util import dmaap
+from dcae_cli.util.exc import DcaeException
+
+
+def test_validate_dmaap_map_schema_message_router():
+ def pack_her_up(entry):
+ return { "some-config-key": entry }
+
+ good = {
+ "type": "message_router",
+ "aaf_username": "foo3",
+ "aaf_password": "bar3",
+ "dmaap_info": {
+ "client_role":"com.dcae.member",
+ "client_id":"1500462518108",
+ "location":"mtc5",
+ "topic_url":"https://dcae-msrt-ftl2.com:3905/events/com.dcae.dmaap.FTL2.TommyTestTopic2"
+ }
+ }
+ dmaap.validate_dmaap_map_schema(pack_her_up(good))
+
+ good_minimal = {
+ "type": "message_router",
+ "dmaap_info": {
+ "topic_url":"https://dcae-msrt-ftl2.com:3905/events/com.dcae.dmaap.FTL2.TommyTestTopic2"
+ }
+ }
+ dmaap.validate_dmaap_map_schema(pack_her_up(good_minimal))
+
+ bad_extra = {
+ "type": "message_router",
+ "aaf_username": "foo3",
+ "aaf_password": "bar3",
+ "something_else": "boo",
+ "dmaap_info": {
+ "client_role":"com.dcae.member",
+ "client_id":"1500462518108",
+ "location":"mtc5",
+ "topic_url":"https://dcae-msrt-ftl2.com:3905/events/com.dcae.dmaap.FTL2.TommyTestTopic2"
+ }
+ }
+ dm = { "some-config-key": bad_extra }
+
+
+ with pytest.raises(DcaeException):
+ dmaap.validate_dmaap_map_schema(dm)
+
+ bad_missing = {
+ "type": "message_router",
+ "aaf_username": "foo3",
+ "aaf_password": "bar3",
+ "dmaap_info": {
+ "client_role":"com.dcae.member",
+ "client_id":"1500462518108",
+ "location":"mtc5"
+ }
+ }
+ dm = { "some-config-key": bad_missing }
+
+ with pytest.raises(DcaeException):
+ dmaap.validate_dmaap_map_schema(dm)
+
+
+def test_validate_dmaap_map_schema_data_router():
+ def pack_her_up(entry):
+ return { "some-config-key": entry }
+
+ # Publishers
+ good = {
+ "type": "data_router",
+ "dmaap_info": {
+ "location": "mtc5",
+ "publish_url": "http://some-publish-url/123",
+ "log_url": "http://some-log-url/456",
+ "username": "jane",
+ "password": "abc"
+ }
+ }
+ dmaap.validate_dmaap_map_schema(pack_her_up(good))
+
+ good_minimal = {
+ "type": "data_router",
+ "dmaap_info": {
+ "publish_url": "http://some-publish-url/123"
+ }
+ }
+ dmaap.validate_dmaap_map_schema(pack_her_up(good_minimal))
+
+ bad_extra = {
+ "type": "data_router",
+ "dmaap_info": {
+ "publish_url": "http://some-publish-url/123",
+ "unknown_key": "value"
+ }
+ }
+ with pytest.raises(DcaeException):
+ dmaap.validate_dmaap_map_schema(pack_her_up(bad_extra))
+
+ # Subscribers
+ good = {
+ "type": "data_router",
+ "dmaap_info": {
+ "username": "drdeliver",
+ "password": "1loveDataR0uter",
+ "location": "loc00",
+ "delivery_url": "https://example.com/whatever",
+ "subscriber_id": "1550"
+ }
+ }
+ dmaap.validate_dmaap_map_schema(pack_her_up(good))
+
+ good_minimal = {
+ "type": "data_router",
+ "dmaap_info": {
+ "delivery_url": "https://example.com/whatever"
+ }
+ }
+ dmaap.validate_dmaap_map_schema(pack_her_up(good_minimal))
+
+ bad_extra = {
+ "type": "data_router",
+ "dmaap_info": {
+ "delivery_url": "https://example.com/whatever",
+ "unknown_key": "value"
+ }
+ }
+ with pytest.raises(DcaeException):
+ dmaap.validate_dmaap_map_schema(pack_her_up(bad_extra))
+
+
+def test_validate_dmaap_map_entries():
+
+ # Success
+
+ dmaap_map = { "mr_pub_fun": { "foo": "bar" }, "mr_sub_fun": { "baz": "duh"} }
+ mr_config_keys = [ "mr_pub_fun", "mr_sub_fun" ]
+ dr_config_keys = []
+
+ assert dmaap.validate_dmaap_map_entries(dmaap_map, mr_config_keys, dr_config_keys) == True
+
+ # Not supposed to be empty
+
+ dmaap_map = {}
+
+ assert dmaap.validate_dmaap_map_entries(dmaap_map, mr_config_keys, dr_config_keys) == False
+
+ # Too many in dmaap map
+
+ # NOTE: This scenario has been changed to be a success case per Tommy who
+ # believes that having extra keys in the dmaap_map is harmless. People would
+ # want to have a master dmaap_map that has a superset of connections used
+ # across many components.
+
+ dmaap_map = { "mr_pub_fun": { "foo": "bar" }, "mr_sub_fun": { "baz": "duh"} }
+ mr_config_keys = [ "mr_pub_fun" ]
+ dr_config_keys = []
+
+ assert dmaap.validate_dmaap_map_entries(dmaap_map, mr_config_keys, dr_config_keys) == True
+
+ # Too little in dmaap map
+
+ dmaap_map = { "mr_pub_fun": { "foo": "bar" }, "mr_sub_fun": { "baz": "duh"} }
+ mr_config_keys = [ "mr_pub_fun", "mr_sub_fun", "mr_xxx" ]
+ dr_config_keys = []
+
+ assert dmaap.validate_dmaap_map_entries(dmaap_map, mr_config_keys, dr_config_keys) == False
+
+
+def test_apply_defaults_dmaap_map():
+ good = {
+ "type": "message_router",
+ "aaf_username": "foo3",
+ "aaf_password": "bar3",
+ "dmaap_info": {
+ "client_role":"com.dcae.member",
+ "client_id":"1500462518108",
+ "location":"mtc5",
+ "topic_url":"https://dcae-msrt-ftl2.com:3905/events/com.dcae.dmaap.FTL2.TommyTestTopic2"
+ }
+ }
+ dm = { "some-config-key": good }
+
+ assert dmaap.apply_defaults_dmaap_map(dm) == dm
+
+ minimal = {
+ "type": "message_router",
+ "dmaap_info": {
+ "topic_url":"https://dcae-msrt-ftl2.com:3905/events/com.dcae.dmaap.FTL2.TommyTestTopic2"
+ }
+ }
+ dm = { "some-config-key": minimal }
+
+ result = dmaap.apply_defaults_dmaap_map(dm)
+ assert result == {'some-config-key': {'aaf_username': None,
+ 'aaf_password': None, 'dmaap_info': {'client_role': None,
+ 'topic_url': 'https://dcae-msrt-ftl2.com:3905/events/com.dcae.dmaap.FTL2.TommyTestTopic2', 'client_id': None, 'location': None},
+ 'type': 'message_router'}}
+
+
+def test_update_delivery_urls():
+ def get_route_with_slash(config_key):
+ return "/eden"
+
+ dmaap_map = {"spade-key": {"type": "data_router", "dmaap_info": {"delivery_url": "bleh","username": "dolittle"}},
+ "clover-key": {"type": "data_router", "dmaap_info": {"publish_url": "manyfoos",
+ "username": "chickenlittle"}}}
+
+ dmaap_map = dmaap.update_delivery_urls(get_route_with_slash, "http://some-host.io", dmaap_map)
+
+ expected = {'spade-key': {"type": "data_router", 'dmaap_info': {'delivery_url': 'http://some-host.io/eden',
+ 'username': 'dolittle'}}, 'clover-key': {"type": "data_router", 'dmaap_info': {'publish_url': 'manyfoos',
+ 'username': 'chickenlittle'}}}
+ assert expected == dmaap_map
+
+ def get_route_no_slash(config_key):
+ return "eden"
+
+ dmaap_map = dmaap.update_delivery_urls(get_route_no_slash, "http://some-host.io", dmaap_map)
+ assert expected == dmaap_map
+
+ # Case when there is nothing to update
+ dmaap_map = {"clover-key": {"type": "data_router", "dmaap_info": {"publish_url": "manyfoos",
+ "username": "chickenlittle"}}}
+
+ assert dmaap_map == dmaap.update_delivery_urls(get_route_no_slash, "http://some-host.io",
+ dmaap_map)
+
+
+def test_list_delivery_urls():
+ dmaap_map = {"spade-key": {"type": "data_router", "dmaap_info": {"delivery_url": "bleh","username": "dolittle"}},
+ "clover-key": {"type": "data_router", "dmaap_info": {"publish_url": "manyfoos",
+ "username": "chickenlittle"}}}
+
+ result = dmaap.list_delivery_urls(dmaap_map)
+ assert result == [('spade-key', 'bleh')]
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_docker_util.py b/mod/onboardingapi/dcae_cli/util/tests/test_docker_util.py
new file mode 100644
index 0000000..1860357
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_docker_util.py
@@ -0,0 +1,62 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+'''
+Provides tests for the docker_util module
+'''
+import pytest
+from dcae_cli.util.profiles import Profile, CONSUL_HOST, CONFIG_BINDING_SERVICE, CDAP_BROKER, DOCKER_HOST
+from dcae_cli.util import docker_util as du
+
+
+# TODO: formalize tests
+'''
+from dcae_cli.util.logger import set_verbose
+set_verbose()
+
+client = _get_docker_client()
+
+params = dict()
+interface_map = dict()
+instance_map = dict()
+# TODO: make-me-valid?
+external_ip ='196.207.143.209'
+
+# TODO: Need to replace the use of asimov
+_run_component('asimov-anomaly-viz:0.0.0',
+ 'bob', 'asimov.anomaly.viz', '1.0.0', params, interface_map, instance_map,
+ external_ip)
+'''
+
+def test_convert_profile_to_docker_envs():
+ expected = { CONSUL_HOST.upper(): "some.consul.somewhere",
+ CONFIG_BINDING_SERVICE.upper(): "some.config_binding.somewhere",
+ CDAP_BROKER.upper(): "broker",
+ DOCKER_HOST.upper(): "some-docker-host"
+ }
+ profile = Profile(**{ CONSUL_HOST: expected[CONSUL_HOST.upper()],
+ CONFIG_BINDING_SERVICE: expected[CONFIG_BINDING_SERVICE.upper()],
+ CDAP_BROKER: expected[CDAP_BROKER.upper()],
+ DOCKER_HOST: expected[DOCKER_HOST.upper()]
+ })
+ actual = du._convert_profile_to_docker_envs(profile)
+
+ assert actual == expected
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_inputs.py b/mod/onboardingapi/dcae_cli/util/tests/test_inputs.py
new file mode 100644
index 0000000..5271705
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_inputs.py
@@ -0,0 +1,37 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+"""
+Tests for inputs module
+"""
+import pytest
+from dcae_cli.util import inputs
+
+
+def test_filter_entries():
+ spec = { "parameters": [{"name": "foo"}, {"name": "bar",
+ "sourced_at_deployment": False}, {"name": "baz", "sourced_at_deployment": True}] }
+
+ with pytest.raises(inputs.InputsValidationError):
+ inputs.filter_entries({}, spec)
+
+ inputs_map = { "foo": "do not copy", "baz": "hello world", "extra": "do not copy" }
+
+ assert len(inputs.filter_entries(inputs_map, spec)) == 1
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_profiles.py b/mod/onboardingapi/dcae_cli/util/tests/test_profiles.py
new file mode 100644
index 0000000..969697a
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_profiles.py
@@ -0,0 +1,162 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Tests the profiles module
+"""
+import os, json, copy
+from functools import partial
+
+import click
+import pytest
+
+from dcae_cli import util
+from dcae_cli.util.exc import DcaeException
+from dcae_cli.util import profiles
+from dcae_cli.util.profiles import (get_active_name, get_profile, get_profiles, get_profiles_path,
+ create_profile, delete_profile, update_profile, ACTIVE,
+ activate_profile, CONSUL_HOST)
+from dcae_cli.util import config
+
+
+def test_profiles(monkeypatch, tmpdir):
+ '''Tests the creation and initialization of profiles on a clean install'''
+ # Setup config
+ config_dict = { "active_profile": "fake-solutioning", "db_url": "some-db" }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(config_dict))
+
+ # Setup profile
+ profile_dict = { "fake-solutioning": { "cdap_broker": "cdap_broker",
+ "config_binding_service": "config_binding_service",
+ "consul_host": "realsolcnsl00.dcae.solutioning.com",
+ "docker_host": "realsoldokr00.dcae.solutioning.com:2376" }}
+ profile_file = tmpdir.join("profiles.json")
+ profile_file.write(json.dumps(profile_dict))
+
+ monkeypatch.setattr(click, "get_app_dir", lambda app: str(tmpdir.realpath()))
+
+ assert get_active_name() == config_dict["active_profile"]
+ assert get_profile() == profiles.Profile(**profile_dict["fake-solutioning"])
+
+ # Failures looking for unknown profile
+
+ with pytest.raises(DcaeException):
+ get_profile('foo')
+
+ with pytest.raises(DcaeException):
+ delete_profile('foo')
+
+ with pytest.raises(DcaeException):
+ update_profile('foo', **{}) # doesn't exist
+
+ # Cannot delete active profile
+
+ assert delete_profile(get_active_name()) == False
+
+ # Do different get_profiles queries
+
+ assert get_profiles(user_only=True) == profile_dict
+ all_profiles = copy.deepcopy(profile_dict)
+ all_profiles[ACTIVE] = profile_dict["fake-solutioning"]
+ assert get_profiles(user_only=False) == all_profiles
+
+ # Create and activate new profile
+
+ create_profile('foo')
+ activate_profile('foo')
+ assert get_active_name() == 'foo'
+
+ # Update new profile
+
+ update_profile('foo', **{CONSUL_HOST:'bar'})
+ assert get_profiles()['foo'][CONSUL_HOST] == 'bar'
+ assert get_profile()._asdict()[CONSUL_HOST] == 'bar'
+
+ activate_profile("fake-solutioning")
+ assert delete_profile('foo') == True
+
+
+def test_reinit_via_get_profiles(monkeypatch, tmpdir):
+ monkeypatch.setattr(click, "get_app_dir", lambda app: str(tmpdir.realpath()))
+
+ def fake_reinit_failure():
+ raise profiles.ProfilesInitError("Faked failure")
+
+ monkeypatch.setattr(profiles, "reinit_profiles", fake_reinit_failure)
+
+ with pytest.raises(DcaeException):
+ get_profiles()
+
+
+def test_reinit_profiles(monkeypatch, tmpdir):
+ monkeypatch.setattr(click, "get_app_dir", lambda app: str(tmpdir.realpath()))
+
+ # Setup config (need this because the "active_profile" is needed)
+ config_dict = { "active_profile": "fake-solutioning", "db_url": "some-db" }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(config_dict))
+
+ # Start with empty profiles
+
+ profile_dict = { "fake-solutioning": { "cdap_broker": "cdap_broker",
+ "config_binding_service": "config_binding_service",
+ "consul_host": "realsolcnsl00.dcae.solutioning.com",
+ "docker_host": "realsoldokr00.dcae.solutioning.com:2376" }}
+
+ def fetch_profile(target_profile, server_url, path):
+ return target_profile
+
+ monkeypatch.setattr(util, "fetch_file_from_web", partial(fetch_profile,
+ profile_dict))
+ profiles.reinit_profiles()
+ assert profiles.get_profiles(include_active=False) == profile_dict
+
+ # Test update
+
+ profile_dict = { "fake-5g": { "cdap_broker": "cdap_broker",
+ "config_binding_service": "config_binding_service",
+ "consul_host": "realsolcnsl00.dcae.solutioning.com",
+ "docker_host": "realsoldokr00.dcae.solutioning.com:2376" }}
+
+ monkeypatch.setattr(util, "fetch_file_from_web", partial(fetch_profile,
+ profile_dict))
+ profiles.reinit_profiles()
+ all_profiles = profiles.get_profiles(include_active=False)
+ assert "fake-5g" in all_profiles
+ assert "fake-solutioning" in all_profiles
+
+ # Test fetch failure
+
+ def fetch_failure(server_url, path):
+ raise RuntimeError("Mysterious error")
+
+ monkeypatch.setattr(util, "fetch_file_from_web", fetch_failure)
+ # Case when user opts out of manually setting up
+ monkeypatch.setattr(click, "confirm", lambda msg: False)
+
+ with pytest.raises(profiles.ProfilesInitError):
+ profiles.reinit_profiles()
+
+
+if __name__ == '__main__':
+ '''Test area'''
+ pytest.main([__file__, ])
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_remove.py b/mod/onboardingapi/dcae_cli/util/tests/test_remove.py
new file mode 100644
index 0000000..92b8ce9
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_remove.py
@@ -0,0 +1,24 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+'''
+TODO: Test removing components
+'''
diff --git a/mod/onboardingapi/dcae_cli/util/tests/test_undeploy.py b/mod/onboardingapi/dcae_cli/util/tests/test_undeploy.py
new file mode 100644
index 0000000..664c69c
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/tests/test_undeploy.py
@@ -0,0 +1,62 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+'''
+Provides tests for the undeploy module
+'''
+from dcae_cli.util.undeploy import _handler, _handler_report
+
+def test_handler():
+ instances = set(["some-instance-name", "another-instance-name"])
+
+ def fake_remove_config(config_key):
+ return True
+
+ def undeploy_success(config_key):
+ return True
+
+ failures, results = _handler([undeploy_success, fake_remove_config], instances)
+
+ assert len(results) == 2
+ assert len(failures) == 0
+
+ def undeploy_failure(config_key):
+ return False
+
+ failures, results = _handler([undeploy_failure, fake_remove_config], instances)
+
+ assert len(results) == 2
+ assert len(failures) == 2
+
+ def undeploy_failure_sometimes(config_key):
+ if "some-instance-name" == config_key:
+ return False
+ return True
+
+ failures, results = _handler([undeploy_failure_sometimes, fake_remove_config], instances)
+
+ assert len(results) == 2
+ assert len(failures) == 1
+
+ failures, results = _handler([undeploy_success, fake_remove_config], [])
+
+ assert len(results) == 0
+ assert len(failures) == 0
diff --git a/mod/onboardingapi/dcae_cli/util/undeploy.py b/mod/onboardingapi/dcae_cli/util/undeploy.py
new file mode 100644
index 0000000..1ce4d76
--- /dev/null
+++ b/mod/onboardingapi/dcae_cli/util/undeploy.py
@@ -0,0 +1,111 @@
+# ============LICENSE_START=======================================================
+# org.onap.dcae
+# ================================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+
+# -*- coding: utf-8 -*-
+"""
+Provides utilities for undeploying components
+"""
+from functools import partial
+from dcae_cli.util.exc import DcaeException
+import dcae_cli.util.profiles as profiles
+from dcae_cli.util.cdap_util import undeploy_component as undeploy_cdap_component
+from dcae_cli.util.discovery import get_healthy_instances, get_defective_instances, \
+ remove_config
+from dcae_cli.util import docker_util as du
+from dcae_cli.util.logger import get_logger
+
+
+log = get_logger('Undeploy')
+
+
+def _handler(undeploy_funcs, instances):
+ """Handles the undeployment
+
+ Executes all undeployment functions for all instances and gathers up the
+ results. No short circuiting.
+
+ Args
+ ----
+ undeploy_funcs: List of functions that have the following signature `fn: string->boolean`
+ the input is a fully qualified instance name and the return is True upon
+ success and False for failures
+ instances: List of fully qualified instance names
+
+ Returns
+ -------
+ (failures, results) where each are a list of tuples. Each tuple has the
+ structure: `(<instance name>, result of func 1, result of func 2, ..)`.
+ """
+ if not instances:
+ return [], []
+
+ # Invoke all undeploy funcs for all instances
+ def invoke_undeploys(instance):
+ return tuple([ undeploy_func(instance) for undeploy_func in undeploy_funcs ])
+
+ results = [ (instance, ) + invoke_undeploys(instance) for instance in instances ]
+
+ # Determine failures
+ filter_failures_func = partial(filter, lambda result: not all(result[1:]))
+ failures = list(filter_failures_func(results))
+
+ return failures, results
+
+
+def _handler_report(failures, results):
+ """Reports the result of handling"""
+ if len(failures) > 0:
+ failed_names = [ result[0] for result in failures ]
+ log.warn("Could not completely undeploy: {0}".format(", ".join(failed_names)))
+
+ # This message captures a case where you are seeing a false negative. If
+ # you attempted to undeploy a component instance and it partially failed
+ # the first time but "succeeded" the second time, the second undeploy
+ # would get reported as a failure. The second undeploy would probably
+ # also be partial undeploy because the undeploy operation that succeeded
+ # the first time will fail the second time.
+ log.warn("NOTE: This could be expected since we are attempting to undeploy a component in a bad partial state")
+ elif len(results) == 0:
+ log.warn("No components found to undeploy")
+ else:
+ # This seems like important info so set it to warning so that it shows up
+ log.warn("Undeployed components: {0}".format(len(results)))
+
+
+def undeploy_component(user, cname, cver, catalog):
+ '''Undeploys a component based on the component type'''
+ cname, cver = catalog.verify_component(cname, cver)
+ ctype = catalog.get_component_type(cname, cver)
+ profile = profiles.get_profile()
+ # Get *all* instances of the component whether running healthy or in a bad partial
+ # deployed state
+ instances = get_healthy_instances(user, cname, cver) + get_defective_instances(user, cname, cver)
+
+ if ctype == 'docker':
+ client = du.get_docker_client(profile)
+ image = catalog.get_docker_image(cname, cver)
+ undeploy_func = partial(du.undeploy_component, client, image)
+ elif ctype == 'cdap':
+ undeploy_func = partial(undeploy_cdap_component, profile)
+ else:
+ raise DcaeException("Unsupported component type for undeploy")
+
+ log.warn("Undeploying components: {0}".format(len(instances)))
+ _handler_report(*_handler([undeploy_func, remove_config], instances))